blob: 1e9bb2156dcfbfb77cccecaf0d67059a5e6da48d [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
Jerome Glisseca262a9992009-12-08 15:33:32 +010030/* Notes:
31 *
32 * We store bo pointer in drm_mm_node struct so we know which bo own a
33 * specific node. There is no protection on the pointer, thus to make
34 * sure things don't go berserk you have to access this pointer while
35 * holding the global lru lock and make sure anytime you free a node you
36 * reset the pointer to NULL.
37 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020038
39#include "ttm/ttm_module.h"
40#include "ttm/ttm_bo_driver.h"
41#include "ttm/ttm_placement.h"
42#include <linux/jiffies.h>
43#include <linux/slab.h>
44#include <linux/sched.h>
45#include <linux/mm.h>
46#include <linux/file.h>
47#include <linux/module.h>
48
49#define TTM_ASSERT_LOCKED(param)
50#define TTM_DEBUG(fmt, arg...)
51#define TTM_BO_HASH_ORDER 13
52
53static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020054static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
Thomas Hellstroma987fca2009-08-18 16:51:56 +020055static void ttm_bo_global_kobj_release(struct kobject *kobj);
56
57static struct attribute ttm_bo_count = {
58 .name = "bo_count",
59 .mode = S_IRUGO
60};
61
Jerome Glissefb53f862009-12-09 21:55:10 +010062static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
63{
64 int i;
65
66 for (i = 0; i <= TTM_PL_PRIV5; i++)
67 if (flags & (1 << i)) {
68 *mem_type = i;
69 return 0;
70 }
71 return -EINVAL;
72}
73
Jerome Glisse5012f502009-12-10 18:07:26 +010074static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
Jerome Glissefb53f862009-12-09 21:55:10 +010075{
Jerome Glisse5012f502009-12-10 18:07:26 +010076 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
77
Jerome Glissefb53f862009-12-09 21:55:10 +010078 printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
79 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
80 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
81 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
Jerome Glisseeb6d2c32009-12-10 16:15:52 +010082 printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
Jerome Glissefb53f862009-12-09 21:55:10 +010083 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
84 man->available_caching);
85 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
86 man->default_caching);
Ben Skeggsd961db72010-08-05 10:48:18 +100087 if (mem_type != TTM_PL_SYSTEM)
88 (*man->func->debug)(man, TTM_PFX);
Jerome Glissefb53f862009-12-09 21:55:10 +010089}
90
91static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
92 struct ttm_placement *placement)
93{
Jerome Glissefb53f862009-12-09 21:55:10 +010094 int i, ret, mem_type;
95
Jerome Glisseeb6d2c32009-12-10 16:15:52 +010096 printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
Jerome Glissefb53f862009-12-09 21:55:10 +010097 bo, bo->mem.num_pages, bo->mem.size >> 10,
98 bo->mem.size >> 20);
99 for (i = 0; i < placement->num_placement; i++) {
100 ret = ttm_mem_type_from_flags(placement->placement[i],
101 &mem_type);
102 if (ret)
103 return;
Jerome Glissefb53f862009-12-09 21:55:10 +0100104 printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
105 i, placement->placement[i], mem_type);
Jerome Glisse5012f502009-12-10 18:07:26 +0100106 ttm_mem_type_debug(bo->bdev, mem_type);
Jerome Glissefb53f862009-12-09 21:55:10 +0100107 }
108}
109
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200110static ssize_t ttm_bo_global_show(struct kobject *kobj,
111 struct attribute *attr,
112 char *buffer)
113{
114 struct ttm_bo_global *glob =
115 container_of(kobj, struct ttm_bo_global, kobj);
116
117 return snprintf(buffer, PAGE_SIZE, "%lu\n",
118 (unsigned long) atomic_read(&glob->bo_count));
119}
120
121static struct attribute *ttm_bo_global_attrs[] = {
122 &ttm_bo_count,
123 NULL
124};
125
Emese Revfy52cf25d2010-01-19 02:58:23 +0100126static const struct sysfs_ops ttm_bo_global_ops = {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200127 .show = &ttm_bo_global_show
128};
129
130static struct kobj_type ttm_bo_glob_kobj_type = {
131 .release = &ttm_bo_global_kobj_release,
132 .sysfs_ops = &ttm_bo_global_ops,
133 .default_attrs = ttm_bo_global_attrs
134};
135
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200136
137static inline uint32_t ttm_bo_type_flags(unsigned type)
138{
139 return 1 << (type);
140}
141
142static void ttm_bo_release_list(struct kref *list_kref)
143{
144 struct ttm_buffer_object *bo =
145 container_of(list_kref, struct ttm_buffer_object, list_kref);
146 struct ttm_bo_device *bdev = bo->bdev;
147
148 BUG_ON(atomic_read(&bo->list_kref.refcount));
149 BUG_ON(atomic_read(&bo->kref.refcount));
150 BUG_ON(atomic_read(&bo->cpu_writers));
151 BUG_ON(bo->sync_obj != NULL);
152 BUG_ON(bo->mem.mm_node != NULL);
153 BUG_ON(!list_empty(&bo->lru));
154 BUG_ON(!list_empty(&bo->ddestroy));
155
156 if (bo->ttm)
157 ttm_tt_destroy(bo->ttm);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200158 atomic_dec(&bo->glob->bo_count);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200159 if (bo->destroy)
160 bo->destroy(bo);
161 else {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200162 ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200163 kfree(bo);
164 }
165}
166
167int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
168{
169
170 if (interruptible) {
171 int ret = 0;
172
173 ret = wait_event_interruptible(bo->event_queue,
174 atomic_read(&bo->reserved) == 0);
175 if (unlikely(ret != 0))
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100176 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200177 } else {
178 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
179 }
180 return 0;
181}
Ben Skeggsd1ede142009-12-11 15:13:00 +1000182EXPORT_SYMBOL(ttm_bo_wait_unreserved);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200183
184static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
185{
186 struct ttm_bo_device *bdev = bo->bdev;
187 struct ttm_mem_type_manager *man;
188
189 BUG_ON(!atomic_read(&bo->reserved));
190
191 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
192
193 BUG_ON(!list_empty(&bo->lru));
194
195 man = &bdev->man[bo->mem.mem_type];
196 list_add_tail(&bo->lru, &man->lru);
197 kref_get(&bo->list_kref);
198
199 if (bo->ttm != NULL) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200200 list_add_tail(&bo->swap, &bo->glob->swap_lru);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200201 kref_get(&bo->list_kref);
202 }
203 }
204}
205
206/**
207 * Call with the lru_lock held.
208 */
209
210static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
211{
212 int put_count = 0;
213
214 if (!list_empty(&bo->swap)) {
215 list_del_init(&bo->swap);
216 ++put_count;
217 }
218 if (!list_empty(&bo->lru)) {
219 list_del_init(&bo->lru);
220 ++put_count;
221 }
222
223 /*
224 * TODO: Add a driver hook to delete from
225 * driver-specific LRU's here.
226 */
227
228 return put_count;
229}
230
231int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
232 bool interruptible,
233 bool no_wait, bool use_sequence, uint32_t sequence)
234{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200235 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200236 int ret;
237
238 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
239 if (use_sequence && bo->seq_valid &&
240 (sequence - bo->val_seq < (1 << 31))) {
241 return -EAGAIN;
242 }
243
244 if (no_wait)
245 return -EBUSY;
246
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200247 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200248 ret = ttm_bo_wait_unreserved(bo, interruptible);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200249 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200250
251 if (unlikely(ret))
252 return ret;
253 }
254
255 if (use_sequence) {
256 bo->val_seq = sequence;
257 bo->seq_valid = true;
258 } else {
259 bo->seq_valid = false;
260 }
261
262 return 0;
263}
264EXPORT_SYMBOL(ttm_bo_reserve);
265
266static void ttm_bo_ref_bug(struct kref *list_kref)
267{
268 BUG();
269}
270
271int ttm_bo_reserve(struct ttm_buffer_object *bo,
272 bool interruptible,
273 bool no_wait, bool use_sequence, uint32_t sequence)
274{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200275 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200276 int put_count = 0;
277 int ret;
278
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200279 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200280 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
281 sequence);
282 if (likely(ret == 0))
283 put_count = ttm_bo_del_from_lru(bo);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200284 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200285
286 while (put_count--)
287 kref_put(&bo->list_kref, ttm_bo_ref_bug);
288
289 return ret;
290}
291
292void ttm_bo_unreserve(struct ttm_buffer_object *bo)
293{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200294 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200295
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200296 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200297 ttm_bo_add_to_lru(bo);
298 atomic_set(&bo->reserved, 0);
299 wake_up_all(&bo->event_queue);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200300 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200301}
302EXPORT_SYMBOL(ttm_bo_unreserve);
303
304/*
305 * Call bo->mutex locked.
306 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200307static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
308{
309 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200310 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200311 int ret = 0;
312 uint32_t page_flags = 0;
313
314 TTM_ASSERT_LOCKED(&bo->mutex);
315 bo->ttm = NULL;
316
Dave Airliead49f502009-07-10 22:36:26 +1000317 if (bdev->need_dma32)
318 page_flags |= TTM_PAGE_FLAG_DMA32;
319
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200320 switch (bo->type) {
321 case ttm_bo_type_device:
322 if (zero_alloc)
323 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
324 case ttm_bo_type_kernel:
325 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200326 page_flags, glob->dummy_read_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200327 if (unlikely(bo->ttm == NULL))
328 ret = -ENOMEM;
329 break;
330 case ttm_bo_type_user:
331 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
332 page_flags | TTM_PAGE_FLAG_USER,
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200333 glob->dummy_read_page);
Dave Airlie447aeb92009-12-08 09:25:45 +1000334 if (unlikely(bo->ttm == NULL)) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200335 ret = -ENOMEM;
Dave Airlie447aeb92009-12-08 09:25:45 +1000336 break;
337 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200338
339 ret = ttm_tt_set_user(bo->ttm, current,
340 bo->buffer_start, bo->num_pages);
341 if (unlikely(ret != 0))
342 ttm_tt_destroy(bo->ttm);
343 break;
344 default:
345 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
346 ret = -EINVAL;
347 break;
348 }
349
350 return ret;
351}
352
353static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
354 struct ttm_mem_reg *mem,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000355 bool evict, bool interruptible,
356 bool no_wait_reserve, bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200357{
358 struct ttm_bo_device *bdev = bo->bdev;
359 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
360 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
361 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
362 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
363 int ret = 0;
364
365 if (old_is_pci || new_is_pci ||
366 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
367 ttm_bo_unmap_virtual(bo);
368
369 /*
370 * Create and bind a ttm if required.
371 */
372
373 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
374 ret = ttm_bo_add_ttm(bo, false);
375 if (ret)
376 goto out_err;
377
378 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
379 if (ret)
Thomas Hellstrom87ef9202009-06-17 12:29:57 +0200380 goto out_err;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200381
382 if (mem->mem_type != TTM_PL_SYSTEM) {
383 ret = ttm_tt_bind(bo->ttm, mem);
384 if (ret)
385 goto out_err;
386 }
387
388 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
Jerome Glisseca262a9992009-12-08 15:33:32 +0100389 bo->mem = *mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200390 mem->mm_node = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200391 goto moved;
392 }
393
394 }
395
Dave Airliee024e112009-06-24 09:48:08 +1000396 if (bdev->driver->move_notify)
397 bdev->driver->move_notify(bo, mem);
398
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200399 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
400 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000401 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200402 else if (bdev->driver->move)
403 ret = bdev->driver->move(bo, evict, interruptible,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000404 no_wait_reserve, no_wait_gpu, mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200405 else
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000406 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200407
408 if (ret)
409 goto out_err;
410
411moved:
412 if (bo->evicted) {
413 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
414 if (ret)
415 printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
416 bo->evicted = false;
417 }
418
419 if (bo->mem.mm_node) {
420 spin_lock(&bo->lock);
Ben Skeggsd961db72010-08-05 10:48:18 +1000421 bo->offset = (bo->mem.start << PAGE_SHIFT) +
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200422 bdev->man[bo->mem.mem_type].gpu_offset;
423 bo->cur_placement = bo->mem.placement;
424 spin_unlock(&bo->lock);
Thomas Hellstrom354fb522010-01-13 22:28:45 +0100425 } else
426 bo->offset = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200427
428 return 0;
429
430out_err:
431 new_man = &bdev->man[bo->mem.mem_type];
432 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
433 ttm_tt_unbind(bo->ttm);
434 ttm_tt_destroy(bo->ttm);
435 bo->ttm = NULL;
436 }
437
438 return ret;
439}
440
441/**
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200442 * Call bo::reserved and with the lru lock held.
443 * Will release GPU memory type usage on destruction.
444 * This is the place to put in driver specific hooks.
445 * Will release the bo::reserved lock and the
446 * lru lock on exit.
447 */
448
449static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
450{
451 struct ttm_bo_global *glob = bo->glob;
452
453 if (bo->ttm) {
454
455 /**
456 * Release the lru_lock, since we don't want to have
457 * an atomic requirement on ttm_tt[unbind|destroy].
458 */
459
460 spin_unlock(&glob->lru_lock);
461 ttm_tt_unbind(bo->ttm);
462 ttm_tt_destroy(bo->ttm);
463 bo->ttm = NULL;
464 spin_lock(&glob->lru_lock);
465 }
466
467 if (bo->mem.mm_node) {
Dave Airlieb7ae5052010-10-19 09:48:34 +1000468 ttm_bo_mem_put(bo, &bo->mem);
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200469 }
470
471 atomic_set(&bo->reserved, 0);
472 wake_up_all(&bo->event_queue);
473 spin_unlock(&glob->lru_lock);
474}
475
476
477/**
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200478 * If bo idle, remove from delayed- and lru lists, and unref.
479 * If not idle, and already on delayed list, do nothing.
480 * If not idle, and not on delayed list, put on delayed list,
481 * up the list_kref and schedule a delayed list check.
482 */
483
484static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
485{
486 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200487 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200488 struct ttm_bo_driver *driver = bdev->driver;
489 int ret;
490
491 spin_lock(&bo->lock);
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200492retry:
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200493 (void) ttm_bo_wait(bo, false, false, !remove_all);
494
495 if (!bo->sync_obj) {
496 int put_count;
497
498 spin_unlock(&bo->lock);
499
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200500 spin_lock(&glob->lru_lock);
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200501 ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0);
Thomas Hellstromaaa20732009-12-02 18:33:45 +0100502
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200503 /**
504 * Someone else has the object reserved. Bail and retry.
505 */
506
507 if (unlikely(ret == -EBUSY)) {
508 spin_unlock(&glob->lru_lock);
509 spin_lock(&bo->lock);
510 goto requeue;
511 }
512
513 /**
514 * We can re-check for sync object without taking
515 * the bo::lock since setting the sync object requires
516 * also bo::reserved. A busy object at this point may
517 * be caused by another thread starting an accelerated
518 * eviction.
519 */
520
521 if (unlikely(bo->sync_obj)) {
522 atomic_set(&bo->reserved, 0);
523 wake_up_all(&bo->event_queue);
524 spin_unlock(&glob->lru_lock);
525 spin_lock(&bo->lock);
526 if (remove_all)
527 goto retry;
528 else
529 goto requeue;
530 }
531
532 put_count = ttm_bo_del_from_lru(bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200533
534 if (!list_empty(&bo->ddestroy)) {
535 list_del_init(&bo->ddestroy);
Thomas Hellstromaaa20732009-12-02 18:33:45 +0100536 ++put_count;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200537 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200538
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200539 ttm_bo_cleanup_memtype_use(bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200540
541 while (put_count--)
Thomas Hellstromaaa20732009-12-02 18:33:45 +0100542 kref_put(&bo->list_kref, ttm_bo_ref_bug);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200543
544 return 0;
545 }
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200546requeue:
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200547 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200548 if (list_empty(&bo->ddestroy)) {
549 void *sync_obj = bo->sync_obj;
550 void *sync_obj_arg = bo->sync_obj_arg;
551
552 kref_get(&bo->list_kref);
553 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200554 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200555 spin_unlock(&bo->lock);
556
557 if (sync_obj)
558 driver->sync_obj_flush(sync_obj, sync_obj_arg);
559 schedule_delayed_work(&bdev->wq,
560 ((HZ / 100) < 1) ? 1 : HZ / 100);
561 ret = 0;
562
563 } else {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200564 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200565 spin_unlock(&bo->lock);
566 ret = -EBUSY;
567 }
568
569 return ret;
570}
571
572/**
573 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
574 * encountered buffers.
575 */
576
577static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
578{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200579 struct ttm_bo_global *glob = bdev->glob;
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100580 struct ttm_buffer_object *entry = NULL;
581 int ret = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200582
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200583 spin_lock(&glob->lru_lock);
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100584 if (list_empty(&bdev->ddestroy))
585 goto out_unlock;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200586
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100587 entry = list_first_entry(&bdev->ddestroy,
588 struct ttm_buffer_object, ddestroy);
589 kref_get(&entry->list_kref);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200590
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100591 for (;;) {
592 struct ttm_buffer_object *nentry = NULL;
593
594 if (entry->ddestroy.next != &bdev->ddestroy) {
595 nentry = list_first_entry(&entry->ddestroy,
596 struct ttm_buffer_object, ddestroy);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200597 kref_get(&nentry->list_kref);
598 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200599
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200600 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200601 ret = ttm_bo_cleanup_refs(entry, remove_all);
602 kref_put(&entry->list_kref, ttm_bo_release_list);
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100603 entry = nentry;
604
605 if (ret || !entry)
606 goto out;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200607
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200608 spin_lock(&glob->lru_lock);
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100609 if (list_empty(&entry->ddestroy))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200610 break;
611 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200612
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100613out_unlock:
614 spin_unlock(&glob->lru_lock);
615out:
616 if (entry)
617 kref_put(&entry->list_kref, ttm_bo_release_list);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200618 return ret;
619}
620
621static void ttm_bo_delayed_workqueue(struct work_struct *work)
622{
623 struct ttm_bo_device *bdev =
624 container_of(work, struct ttm_bo_device, wq.work);
625
626 if (ttm_bo_delayed_delete(bdev, false)) {
627 schedule_delayed_work(&bdev->wq,
628 ((HZ / 100) < 1) ? 1 : HZ / 100);
629 }
630}
631
632static void ttm_bo_release(struct kref *kref)
633{
634 struct ttm_buffer_object *bo =
635 container_of(kref, struct ttm_buffer_object, kref);
636 struct ttm_bo_device *bdev = bo->bdev;
637
638 if (likely(bo->vm_node != NULL)) {
639 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
640 drm_mm_put_block(bo->vm_node);
641 bo->vm_node = NULL;
642 }
643 write_unlock(&bdev->vm_lock);
644 ttm_bo_cleanup_refs(bo, false);
645 kref_put(&bo->list_kref, ttm_bo_release_list);
646 write_lock(&bdev->vm_lock);
647}
648
649void ttm_bo_unref(struct ttm_buffer_object **p_bo)
650{
651 struct ttm_buffer_object *bo = *p_bo;
652 struct ttm_bo_device *bdev = bo->bdev;
653
654 *p_bo = NULL;
655 write_lock(&bdev->vm_lock);
656 kref_put(&bo->kref, ttm_bo_release);
657 write_unlock(&bdev->vm_lock);
658}
659EXPORT_SYMBOL(ttm_bo_unref);
660
Matthew Garrett7c5ee532010-04-26 16:00:09 -0400661int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
662{
663 return cancel_delayed_work_sync(&bdev->wq);
664}
665EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
666
667void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
668{
669 if (resched)
670 schedule_delayed_work(&bdev->wq,
671 ((HZ / 100) < 1) ? 1 : HZ / 100);
672}
673EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
674
Jerome Glisseca262a9992009-12-08 15:33:32 +0100675static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000676 bool no_wait_reserve, bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200677{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200678 struct ttm_bo_device *bdev = bo->bdev;
679 struct ttm_mem_reg evict_mem;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100680 struct ttm_placement placement;
681 int ret = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200682
683 spin_lock(&bo->lock);
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000684 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200685 spin_unlock(&bo->lock);
686
Thomas Hellstrom78ecf092009-06-17 12:29:55 +0200687 if (unlikely(ret != 0)) {
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100688 if (ret != -ERESTARTSYS) {
Thomas Hellstrom78ecf092009-06-17 12:29:55 +0200689 printk(KERN_ERR TTM_PFX
690 "Failed to expire sync object before "
691 "buffer eviction.\n");
692 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200693 goto out;
694 }
695
696 BUG_ON(!atomic_read(&bo->reserved));
697
698 evict_mem = bo->mem;
699 evict_mem.mm_node = NULL;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200700 evict_mem.bus.io_reserved = false;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200701
Jerome Glisse7cb7d1d2009-12-09 22:14:27 +0100702 placement.fpfn = 0;
703 placement.lpfn = 0;
704 placement.num_placement = 0;
705 placement.num_busy_placement = 0;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100706 bdev->driver->evict_flags(bo, &placement);
707 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000708 no_wait_reserve, no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200709 if (ret) {
Jerome Glissefb53f862009-12-09 21:55:10 +0100710 if (ret != -ERESTARTSYS) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200711 printk(KERN_ERR TTM_PFX
712 "Failed to find memory space for "
713 "buffer 0x%p eviction.\n", bo);
Jerome Glissefb53f862009-12-09 21:55:10 +0100714 ttm_bo_mem_space_debug(bo, &placement);
715 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200716 goto out;
717 }
718
719 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000720 no_wait_reserve, no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200721 if (ret) {
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100722 if (ret != -ERESTARTSYS)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200723 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
Ben Skeggs42311ff2010-08-04 12:07:08 +1000724 ttm_bo_mem_put(bo, &evict_mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200725 goto out;
726 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200727 bo->evicted = true;
728out:
729 return ret;
730}
731
Jerome Glisseca262a9992009-12-08 15:33:32 +0100732static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
733 uint32_t mem_type,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000734 bool interruptible, bool no_wait_reserve,
735 bool no_wait_gpu)
Jerome Glisseca262a9992009-12-08 15:33:32 +0100736{
737 struct ttm_bo_global *glob = bdev->glob;
738 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
739 struct ttm_buffer_object *bo;
740 int ret, put_count = 0;
741
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100742retry:
Jerome Glisseca262a9992009-12-08 15:33:32 +0100743 spin_lock(&glob->lru_lock);
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100744 if (list_empty(&man->lru)) {
745 spin_unlock(&glob->lru_lock);
746 return -EBUSY;
747 }
748
Jerome Glisseca262a9992009-12-08 15:33:32 +0100749 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
750 kref_get(&bo->list_kref);
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100751
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000752 ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100753
754 if (unlikely(ret == -EBUSY)) {
755 spin_unlock(&glob->lru_lock);
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000756 if (likely(!no_wait_gpu))
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100757 ret = ttm_bo_wait_unreserved(bo, interruptible);
758
759 kref_put(&bo->list_kref, ttm_bo_release_list);
760
761 /**
762 * We *need* to retry after releasing the lru lock.
763 */
764
765 if (unlikely(ret != 0))
766 return ret;
767 goto retry;
768 }
769
770 put_count = ttm_bo_del_from_lru(bo);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100771 spin_unlock(&glob->lru_lock);
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100772
773 BUG_ON(ret != 0);
774
Jerome Glisseca262a9992009-12-08 15:33:32 +0100775 while (put_count--)
776 kref_put(&bo->list_kref, ttm_bo_ref_bug);
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100777
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000778 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100779 ttm_bo_unreserve(bo);
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100780
Jerome Glisseca262a9992009-12-08 15:33:32 +0100781 kref_put(&bo->list_kref, ttm_bo_release_list);
782 return ret;
783}
784
Ben Skeggs42311ff2010-08-04 12:07:08 +1000785void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
786{
Ben Skeggsd961db72010-08-05 10:48:18 +1000787 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
Ben Skeggs42311ff2010-08-04 12:07:08 +1000788
Ben Skeggsd961db72010-08-05 10:48:18 +1000789 if (mem->mm_node)
790 (*man->func->put_node)(man, mem);
Ben Skeggs42311ff2010-08-04 12:07:08 +1000791}
792EXPORT_SYMBOL(ttm_bo_mem_put);
793
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200794/**
795 * Repeatedly evict memory from the LRU for @mem_type until we create enough
796 * space, or we've evicted everything and there isn't enough space.
797 */
Jerome Glisseca262a9992009-12-08 15:33:32 +0100798static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
799 uint32_t mem_type,
800 struct ttm_placement *placement,
801 struct ttm_mem_reg *mem,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000802 bool interruptible,
803 bool no_wait_reserve,
804 bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200805{
Jerome Glisseca262a9992009-12-08 15:33:32 +0100806 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200807 struct ttm_bo_global *glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200808 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200809 int ret;
810
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200811 do {
Ben Skeggsd961db72010-08-05 10:48:18 +1000812 ret = (*man->func->get_node)(man, bo, placement, mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200813 if (unlikely(ret != 0))
814 return ret;
Ben Skeggsd961db72010-08-05 10:48:18 +1000815 if (mem->mm_node)
Jerome Glisseca262a9992009-12-08 15:33:32 +0100816 break;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200817 spin_lock(&glob->lru_lock);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100818 if (list_empty(&man->lru)) {
819 spin_unlock(&glob->lru_lock);
820 break;
821 }
822 spin_unlock(&glob->lru_lock);
823 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000824 no_wait_reserve, no_wait_gpu);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100825 if (unlikely(ret != 0))
826 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200827 } while (1);
Ben Skeggsd961db72010-08-05 10:48:18 +1000828 if (mem->mm_node == NULL)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200829 return -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200830 mem->mem_type = mem_type;
831 return 0;
832}
833
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200834static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
835 uint32_t cur_placement,
836 uint32_t proposed_placement)
837{
838 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
839 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
840
841 /**
842 * Keep current caching if possible.
843 */
844
845 if ((cur_placement & caching) != 0)
846 result |= (cur_placement & caching);
847 else if ((man->default_caching & caching) != 0)
848 result |= man->default_caching;
849 else if ((TTM_PL_FLAG_CACHED & caching) != 0)
850 result |= TTM_PL_FLAG_CACHED;
851 else if ((TTM_PL_FLAG_WC & caching) != 0)
852 result |= TTM_PL_FLAG_WC;
853 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
854 result |= TTM_PL_FLAG_UNCACHED;
855
856 return result;
857}
858
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200859static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
860 bool disallow_fixed,
861 uint32_t mem_type,
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200862 uint32_t proposed_placement,
863 uint32_t *masked_placement)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200864{
865 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
866
867 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
868 return false;
869
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200870 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200871 return false;
872
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200873 if ((proposed_placement & man->available_caching) == 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200874 return false;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200875
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200876 cur_flags |= (proposed_placement & man->available_caching);
877
878 *masked_placement = cur_flags;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200879 return true;
880}
881
882/**
883 * Creates space for memory region @mem according to its type.
884 *
885 * This function first searches for free space in compatible memory types in
886 * the priority order defined by the driver. If free space isn't found, then
887 * ttm_bo_mem_force_space is attempted in priority order to evict and find
888 * space.
889 */
890int ttm_bo_mem_space(struct ttm_buffer_object *bo,
Jerome Glisseca262a9992009-12-08 15:33:32 +0100891 struct ttm_placement *placement,
892 struct ttm_mem_reg *mem,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000893 bool interruptible, bool no_wait_reserve,
894 bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200895{
896 struct ttm_bo_device *bdev = bo->bdev;
897 struct ttm_mem_type_manager *man;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200898 uint32_t mem_type = TTM_PL_SYSTEM;
899 uint32_t cur_flags = 0;
900 bool type_found = false;
901 bool type_ok = false;
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100902 bool has_erestartsys = false;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100903 int i, ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200904
905 mem->mm_node = NULL;
Dave Airlieb6637522009-12-14 14:51:35 +1000906 for (i = 0; i < placement->num_placement; ++i) {
Jerome Glisseca262a9992009-12-08 15:33:32 +0100907 ret = ttm_mem_type_from_flags(placement->placement[i],
908 &mem_type);
909 if (ret)
910 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200911 man = &bdev->man[mem_type];
912
913 type_ok = ttm_bo_mt_compatible(man,
Jerome Glisseca262a9992009-12-08 15:33:32 +0100914 bo->type == ttm_bo_type_user,
915 mem_type,
916 placement->placement[i],
917 &cur_flags);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200918
919 if (!type_ok)
920 continue;
921
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200922 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
923 cur_flags);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100924 /*
925 * Use the access and other non-mapping-related flag bits from
926 * the memory placement flags to the current flags
927 */
928 ttm_flag_masked(&cur_flags, placement->placement[i],
929 ~TTM_PL_MASK_MEMTYPE);
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200930
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200931 if (mem_type == TTM_PL_SYSTEM)
932 break;
933
934 if (man->has_type && man->use_type) {
935 type_found = true;
Ben Skeggsd961db72010-08-05 10:48:18 +1000936 ret = (*man->func->get_node)(man, bo, placement, mem);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100937 if (unlikely(ret))
938 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200939 }
Ben Skeggsd961db72010-08-05 10:48:18 +1000940 if (mem->mm_node)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200941 break;
942 }
943
Ben Skeggsd961db72010-08-05 10:48:18 +1000944 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200945 mem->mem_type = mem_type;
946 mem->placement = cur_flags;
947 return 0;
948 }
949
950 if (!type_found)
951 return -EINVAL;
952
Dave Airlieb6637522009-12-14 14:51:35 +1000953 for (i = 0; i < placement->num_busy_placement; ++i) {
954 ret = ttm_mem_type_from_flags(placement->busy_placement[i],
Jerome Glisseca262a9992009-12-08 15:33:32 +0100955 &mem_type);
956 if (ret)
957 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200958 man = &bdev->man[mem_type];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200959 if (!man->has_type)
960 continue;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200961 if (!ttm_bo_mt_compatible(man,
Jerome Glisseca262a9992009-12-08 15:33:32 +0100962 bo->type == ttm_bo_type_user,
963 mem_type,
Dave Airlieb6637522009-12-14 14:51:35 +1000964 placement->busy_placement[i],
Jerome Glisseca262a9992009-12-08 15:33:32 +0100965 &cur_flags))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200966 continue;
967
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200968 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
969 cur_flags);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100970 /*
971 * Use the access and other non-mapping-related flag bits from
972 * the memory placement flags to the current flags
973 */
Dave Airlieb6637522009-12-14 14:51:35 +1000974 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
Jerome Glisseca262a9992009-12-08 15:33:32 +0100975 ~TTM_PL_MASK_MEMTYPE);
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200976
Thomas Hellstrom0eaddb22010-01-16 16:05:04 +0100977
978 if (mem_type == TTM_PL_SYSTEM) {
979 mem->mem_type = mem_type;
980 mem->placement = cur_flags;
981 mem->mm_node = NULL;
982 return 0;
983 }
984
Jerome Glisseca262a9992009-12-08 15:33:32 +0100985 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000986 interruptible, no_wait_reserve, no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200987 if (ret == 0 && mem->mm_node) {
988 mem->placement = cur_flags;
989 return 0;
990 }
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100991 if (ret == -ERESTARTSYS)
992 has_erestartsys = true;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200993 }
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100994 ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200995 return ret;
996}
997EXPORT_SYMBOL(ttm_bo_mem_space);
998
999int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
1000{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001001 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
1002 return -EBUSY;
1003
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +01001004 return wait_event_interruptible(bo->event_queue,
1005 atomic_read(&bo->cpu_writers) == 0);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001006}
Ben Skeggsd1ede142009-12-11 15:13:00 +10001007EXPORT_SYMBOL(ttm_bo_wait_cpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001008
1009int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
Jerome Glisseca262a9992009-12-08 15:33:32 +01001010 struct ttm_placement *placement,
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001011 bool interruptible, bool no_wait_reserve,
1012 bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001013{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001014 int ret = 0;
1015 struct ttm_mem_reg mem;
1016
1017 BUG_ON(!atomic_read(&bo->reserved));
1018
1019 /*
1020 * FIXME: It's possible to pipeline buffer moves.
1021 * Have the driver move function wait for idle when necessary,
1022 * instead of doing it here.
1023 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001024 spin_lock(&bo->lock);
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001025 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001026 spin_unlock(&bo->lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001027 if (ret)
1028 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001029 mem.num_pages = bo->num_pages;
1030 mem.size = mem.num_pages << PAGE_SHIFT;
1031 mem.page_alignment = bo->mem.page_alignment;
Jerome Glisse82c5da62010-04-09 14:39:23 +02001032 mem.bus.io_reserved = false;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001033 /*
1034 * Determine where to move the buffer.
1035 */
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001036 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001037 if (ret)
1038 goto out_unlock;
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001039 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001040out_unlock:
Ben Skeggsd961db72010-08-05 10:48:18 +10001041 if (ret && mem.mm_node)
1042 ttm_bo_mem_put(bo, &mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001043 return ret;
1044}
1045
Jerome Glisseca262a9992009-12-08 15:33:32 +01001046static int ttm_bo_mem_compat(struct ttm_placement *placement,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001047 struct ttm_mem_reg *mem)
1048{
Jerome Glisseca262a9992009-12-08 15:33:32 +01001049 int i;
Thomas Hellstrome22238e2010-02-12 00:18:00 +01001050
Ben Skeggsd961db72010-08-05 10:48:18 +10001051 if (mem->mm_node && placement->lpfn != 0 &&
1052 (mem->start < placement->fpfn ||
1053 mem->start + mem->num_pages > placement->lpfn))
Thomas Hellstrome22238e2010-02-12 00:18:00 +01001054 return -1;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001055
Jerome Glisseca262a9992009-12-08 15:33:32 +01001056 for (i = 0; i < placement->num_placement; i++) {
1057 if ((placement->placement[i] & mem->placement &
1058 TTM_PL_MASK_CACHING) &&
1059 (placement->placement[i] & mem->placement &
1060 TTM_PL_MASK_MEM))
1061 return i;
1062 }
1063 return -1;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001064}
1065
Jerome Glisse09855ac2009-12-10 17:16:27 +01001066int ttm_bo_validate(struct ttm_buffer_object *bo,
1067 struct ttm_placement *placement,
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001068 bool interruptible, bool no_wait_reserve,
1069 bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001070{
1071 int ret;
1072
1073 BUG_ON(!atomic_read(&bo->reserved));
Jerome Glisseca262a9992009-12-08 15:33:32 +01001074 /* Check that range is valid */
1075 if (placement->lpfn || placement->fpfn)
1076 if (placement->fpfn > placement->lpfn ||
1077 (placement->lpfn - placement->fpfn) < bo->num_pages)
1078 return -EINVAL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001079 /*
1080 * Check whether we need to move buffer.
1081 */
Jerome Glisseca262a9992009-12-08 15:33:32 +01001082 ret = ttm_bo_mem_compat(placement, &bo->mem);
1083 if (ret < 0) {
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001084 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
Jerome Glisseca262a9992009-12-08 15:33:32 +01001085 if (ret)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001086 return ret;
Jerome Glisseca262a9992009-12-08 15:33:32 +01001087 } else {
1088 /*
1089 * Use the access and other non-mapping-related flag bits from
1090 * the compatible memory placement flags to the active flags
1091 */
1092 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1093 ~TTM_PL_MASK_MEMTYPE);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001094 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001095 /*
1096 * We might need to add a TTM.
1097 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001098 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1099 ret = ttm_bo_add_ttm(bo, true);
1100 if (ret)
1101 return ret;
1102 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001103 return 0;
1104}
Jerome Glisse09855ac2009-12-10 17:16:27 +01001105EXPORT_SYMBOL(ttm_bo_validate);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001106
Jerome Glisse09855ac2009-12-10 17:16:27 +01001107int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1108 struct ttm_placement *placement)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001109{
Jerome Glisse09855ac2009-12-10 17:16:27 +01001110 int i;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001111
Jerome Glisse09855ac2009-12-10 17:16:27 +01001112 if (placement->fpfn || placement->lpfn) {
1113 if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
1114 printk(KERN_ERR TTM_PFX "Page number range to small "
1115 "Need %lu pages, range is [%u, %u]\n",
1116 bo->mem.num_pages, placement->fpfn,
1117 placement->lpfn);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001118 return -EINVAL;
1119 }
Jerome Glisse09855ac2009-12-10 17:16:27 +01001120 }
1121 for (i = 0; i < placement->num_placement; i++) {
1122 if (!capable(CAP_SYS_ADMIN)) {
1123 if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
1124 printk(KERN_ERR TTM_PFX "Need to be root to "
1125 "modify NO_EVICT status.\n");
1126 return -EINVAL;
1127 }
1128 }
1129 }
1130 for (i = 0; i < placement->num_busy_placement; i++) {
1131 if (!capable(CAP_SYS_ADMIN)) {
1132 if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
1133 printk(KERN_ERR TTM_PFX "Need to be root to "
1134 "modify NO_EVICT status.\n");
1135 return -EINVAL;
1136 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001137 }
1138 }
1139 return 0;
1140}
1141
Jerome Glisse09855ac2009-12-10 17:16:27 +01001142int ttm_bo_init(struct ttm_bo_device *bdev,
1143 struct ttm_buffer_object *bo,
1144 unsigned long size,
1145 enum ttm_bo_type type,
1146 struct ttm_placement *placement,
1147 uint32_t page_alignment,
1148 unsigned long buffer_start,
1149 bool interruptible,
1150 struct file *persistant_swap_storage,
1151 size_t acc_size,
1152 void (*destroy) (struct ttm_buffer_object *))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001153{
Jerome Glisse09855ac2009-12-10 17:16:27 +01001154 int ret = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001155 unsigned long num_pages;
1156
1157 size += buffer_start & ~PAGE_MASK;
1158 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1159 if (num_pages == 0) {
1160 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1161 return -EINVAL;
1162 }
1163 bo->destroy = destroy;
1164
1165 spin_lock_init(&bo->lock);
1166 kref_init(&bo->kref);
1167 kref_init(&bo->list_kref);
1168 atomic_set(&bo->cpu_writers, 0);
1169 atomic_set(&bo->reserved, 1);
1170 init_waitqueue_head(&bo->event_queue);
1171 INIT_LIST_HEAD(&bo->lru);
1172 INIT_LIST_HEAD(&bo->ddestroy);
1173 INIT_LIST_HEAD(&bo->swap);
1174 bo->bdev = bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001175 bo->glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001176 bo->type = type;
1177 bo->num_pages = num_pages;
Jerome Glisseeb6d2c32009-12-10 16:15:52 +01001178 bo->mem.size = num_pages << PAGE_SHIFT;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001179 bo->mem.mem_type = TTM_PL_SYSTEM;
1180 bo->mem.num_pages = bo->num_pages;
1181 bo->mem.mm_node = NULL;
1182 bo->mem.page_alignment = page_alignment;
Jerome Glisse82c5da62010-04-09 14:39:23 +02001183 bo->mem.bus.io_reserved = false;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001184 bo->buffer_start = buffer_start & PAGE_MASK;
1185 bo->priv_flags = 0;
1186 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1187 bo->seq_valid = false;
1188 bo->persistant_swap_storage = persistant_swap_storage;
1189 bo->acc_size = acc_size;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001190 atomic_inc(&bo->glob->bo_count);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001191
Jerome Glisse09855ac2009-12-10 17:16:27 +01001192 ret = ttm_bo_check_placement(bo, placement);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001193 if (unlikely(ret != 0))
1194 goto out_err;
1195
1196 /*
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001197 * For ttm_bo_type_device buffers, allocate
1198 * address space from the device.
1199 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001200 if (bo->type == ttm_bo_type_device) {
1201 ret = ttm_bo_setup_vm(bo);
1202 if (ret)
1203 goto out_err;
1204 }
1205
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001206 ret = ttm_bo_validate(bo, placement, interruptible, false, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001207 if (ret)
1208 goto out_err;
1209
1210 ttm_bo_unreserve(bo);
1211 return 0;
1212
1213out_err:
1214 ttm_bo_unreserve(bo);
1215 ttm_bo_unref(&bo);
1216
1217 return ret;
1218}
Jerome Glisse09855ac2009-12-10 17:16:27 +01001219EXPORT_SYMBOL(ttm_bo_init);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001220
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001221static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001222 unsigned long num_pages)
1223{
1224 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1225 PAGE_MASK;
1226
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001227 return glob->ttm_bo_size + 2 * page_array_size;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001228}
1229
Jerome Glisse09855ac2009-12-10 17:16:27 +01001230int ttm_bo_create(struct ttm_bo_device *bdev,
1231 unsigned long size,
1232 enum ttm_bo_type type,
1233 struct ttm_placement *placement,
1234 uint32_t page_alignment,
1235 unsigned long buffer_start,
1236 bool interruptible,
1237 struct file *persistant_swap_storage,
1238 struct ttm_buffer_object **p_bo)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001239{
1240 struct ttm_buffer_object *bo;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001241 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
Jerome Glisseca262a9992009-12-08 15:33:32 +01001242 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001243
1244 size_t acc_size =
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001245 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +02001246 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001247 if (unlikely(ret != 0))
1248 return ret;
1249
1250 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1251
1252 if (unlikely(bo == NULL)) {
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +02001253 ttm_mem_global_free(mem_glob, acc_size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001254 return -ENOMEM;
1255 }
1256
Jerome Glisse09855ac2009-12-10 17:16:27 +01001257 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1258 buffer_start, interruptible,
1259 persistant_swap_storage, acc_size, NULL);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001260 if (likely(ret == 0))
1261 *p_bo = bo;
1262
1263 return ret;
1264}
1265
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001266static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
Jerome Glisseca262a9992009-12-08 15:33:32 +01001267 unsigned mem_type, bool allow_errors)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001268{
Jerome Glisseca262a9992009-12-08 15:33:32 +01001269 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001270 struct ttm_bo_global *glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001271 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001272
1273 /*
1274 * Can't use standard list traversal since we're unlocking.
1275 */
1276
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001277 spin_lock(&glob->lru_lock);
Jerome Glisseca262a9992009-12-08 15:33:32 +01001278 while (!list_empty(&man->lru)) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001279 spin_unlock(&glob->lru_lock);
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001280 ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
Jerome Glisseca262a9992009-12-08 15:33:32 +01001281 if (ret) {
1282 if (allow_errors) {
1283 return ret;
1284 } else {
1285 printk(KERN_ERR TTM_PFX
1286 "Cleanup eviction failed\n");
1287 }
1288 }
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001289 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001290 }
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001291 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001292 return 0;
1293}
1294
1295int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1296{
Roel Kluinc96e7c72009-08-03 14:22:53 +02001297 struct ttm_mem_type_manager *man;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001298 int ret = -EINVAL;
1299
1300 if (mem_type >= TTM_NUM_MEM_TYPES) {
1301 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1302 return ret;
1303 }
Roel Kluinc96e7c72009-08-03 14:22:53 +02001304 man = &bdev->man[mem_type];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001305
1306 if (!man->has_type) {
1307 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1308 "memory manager type %u\n", mem_type);
1309 return ret;
1310 }
1311
1312 man->use_type = false;
1313 man->has_type = false;
1314
1315 ret = 0;
1316 if (mem_type > 0) {
Jerome Glisseca262a9992009-12-08 15:33:32 +01001317 ttm_bo_force_list_clean(bdev, mem_type, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001318
Ben Skeggsd961db72010-08-05 10:48:18 +10001319 ret = (*man->func->takedown)(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001320 }
1321
1322 return ret;
1323}
1324EXPORT_SYMBOL(ttm_bo_clean_mm);
1325
1326int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1327{
1328 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1329
1330 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1331 printk(KERN_ERR TTM_PFX
1332 "Illegal memory manager memory type %u.\n",
1333 mem_type);
1334 return -EINVAL;
1335 }
1336
1337 if (!man->has_type) {
1338 printk(KERN_ERR TTM_PFX
1339 "Memory type %u has not been initialized.\n",
1340 mem_type);
1341 return 0;
1342 }
1343
Jerome Glisseca262a9992009-12-08 15:33:32 +01001344 return ttm_bo_force_list_clean(bdev, mem_type, true);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001345}
1346EXPORT_SYMBOL(ttm_bo_evict_mm);
1347
1348int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
Jerome Glisseca262a9992009-12-08 15:33:32 +01001349 unsigned long p_size)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001350{
1351 int ret = -EINVAL;
1352 struct ttm_mem_type_manager *man;
1353
1354 if (type >= TTM_NUM_MEM_TYPES) {
1355 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1356 return ret;
1357 }
1358
1359 man = &bdev->man[type];
1360 if (man->has_type) {
1361 printk(KERN_ERR TTM_PFX
1362 "Memory manager already initialized for type %d\n",
1363 type);
1364 return ret;
1365 }
1366
1367 ret = bdev->driver->init_mem_type(bdev, type, man);
1368 if (ret)
1369 return ret;
Ben Skeggsd961db72010-08-05 10:48:18 +10001370 man->bdev = bdev;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001371
1372 ret = 0;
1373 if (type != TTM_PL_SYSTEM) {
1374 if (!p_size) {
1375 printk(KERN_ERR TTM_PFX
1376 "Zero size memory manager type %d\n",
1377 type);
1378 return ret;
1379 }
Ben Skeggsd961db72010-08-05 10:48:18 +10001380
1381 ret = (*man->func->init)(man, p_size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001382 if (ret)
1383 return ret;
1384 }
1385 man->has_type = true;
1386 man->use_type = true;
1387 man->size = p_size;
1388
1389 INIT_LIST_HEAD(&man->lru);
1390
1391 return 0;
1392}
1393EXPORT_SYMBOL(ttm_bo_init_mm);
1394
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001395static void ttm_bo_global_kobj_release(struct kobject *kobj)
1396{
1397 struct ttm_bo_global *glob =
1398 container_of(kobj, struct ttm_bo_global, kobj);
1399
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001400 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1401 __free_page(glob->dummy_read_page);
1402 kfree(glob);
1403}
1404
Dave Airlieba4420c2010-03-09 10:56:52 +10001405void ttm_bo_global_release(struct drm_global_reference *ref)
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001406{
1407 struct ttm_bo_global *glob = ref->object;
1408
1409 kobject_del(&glob->kobj);
1410 kobject_put(&glob->kobj);
1411}
1412EXPORT_SYMBOL(ttm_bo_global_release);
1413
Dave Airlieba4420c2010-03-09 10:56:52 +10001414int ttm_bo_global_init(struct drm_global_reference *ref)
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001415{
1416 struct ttm_bo_global_ref *bo_ref =
1417 container_of(ref, struct ttm_bo_global_ref, ref);
1418 struct ttm_bo_global *glob = ref->object;
1419 int ret;
1420
1421 mutex_init(&glob->device_list_mutex);
1422 spin_lock_init(&glob->lru_lock);
1423 glob->mem_glob = bo_ref->mem_glob;
1424 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1425
1426 if (unlikely(glob->dummy_read_page == NULL)) {
1427 ret = -ENOMEM;
1428 goto out_no_drp;
1429 }
1430
1431 INIT_LIST_HEAD(&glob->swap_lru);
1432 INIT_LIST_HEAD(&glob->device_list);
1433
1434 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1435 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1436 if (unlikely(ret != 0)) {
1437 printk(KERN_ERR TTM_PFX
1438 "Could not register buffer object swapout.\n");
1439 goto out_no_shrink;
1440 }
1441
1442 glob->ttm_bo_extra_size =
1443 ttm_round_pot(sizeof(struct ttm_tt)) +
1444 ttm_round_pot(sizeof(struct ttm_backend));
1445
1446 glob->ttm_bo_size = glob->ttm_bo_extra_size +
1447 ttm_round_pot(sizeof(struct ttm_buffer_object));
1448
1449 atomic_set(&glob->bo_count, 0);
1450
Robert P. J. Dayb642ed02010-03-13 10:36:32 +00001451 ret = kobject_init_and_add(
1452 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001453 if (unlikely(ret != 0))
1454 kobject_put(&glob->kobj);
1455 return ret;
1456out_no_shrink:
1457 __free_page(glob->dummy_read_page);
1458out_no_drp:
1459 kfree(glob);
1460 return ret;
1461}
1462EXPORT_SYMBOL(ttm_bo_global_init);
1463
1464
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001465int ttm_bo_device_release(struct ttm_bo_device *bdev)
1466{
1467 int ret = 0;
1468 unsigned i = TTM_NUM_MEM_TYPES;
1469 struct ttm_mem_type_manager *man;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001470 struct ttm_bo_global *glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001471
1472 while (i--) {
1473 man = &bdev->man[i];
1474 if (man->has_type) {
1475 man->use_type = false;
1476 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1477 ret = -EBUSY;
1478 printk(KERN_ERR TTM_PFX
1479 "DRM memory manager type %d "
1480 "is not clean.\n", i);
1481 }
1482 man->has_type = false;
1483 }
1484 }
1485
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001486 mutex_lock(&glob->device_list_mutex);
1487 list_del(&bdev->device_list);
1488 mutex_unlock(&glob->device_list_mutex);
1489
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001490 if (!cancel_delayed_work(&bdev->wq))
1491 flush_scheduled_work();
1492
1493 while (ttm_bo_delayed_delete(bdev, true))
1494 ;
1495
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001496 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001497 if (list_empty(&bdev->ddestroy))
1498 TTM_DEBUG("Delayed destroy list was clean\n");
1499
1500 if (list_empty(&bdev->man[0].lru))
1501 TTM_DEBUG("Swap list was clean\n");
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001502 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001503
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001504 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1505 write_lock(&bdev->vm_lock);
1506 drm_mm_takedown(&bdev->addr_space_mm);
1507 write_unlock(&bdev->vm_lock);
1508
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001509 return ret;
1510}
1511EXPORT_SYMBOL(ttm_bo_device_release);
1512
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001513int ttm_bo_device_init(struct ttm_bo_device *bdev,
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001514 struct ttm_bo_global *glob,
1515 struct ttm_bo_driver *driver,
Dave Airlie51c8b402009-08-20 13:38:04 +10001516 uint64_t file_page_offset,
Dave Airliead49f502009-07-10 22:36:26 +10001517 bool need_dma32)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001518{
1519 int ret = -EINVAL;
1520
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001521 rwlock_init(&bdev->vm_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001522 bdev->driver = driver;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001523
1524 memset(bdev->man, 0, sizeof(bdev->man));
1525
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001526 /*
1527 * Initialize the system memory buffer type.
1528 * Other types need to be driver / IOCTL initialized.
1529 */
Jerome Glisseca262a9992009-12-08 15:33:32 +01001530 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001531 if (unlikely(ret != 0))
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001532 goto out_no_sys;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001533
1534 bdev->addr_space_rb = RB_ROOT;
1535 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1536 if (unlikely(ret != 0))
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001537 goto out_no_addr_mm;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001538
1539 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1540 bdev->nice_mode = true;
1541 INIT_LIST_HEAD(&bdev->ddestroy);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001542 bdev->dev_mapping = NULL;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001543 bdev->glob = glob;
Dave Airliead49f502009-07-10 22:36:26 +10001544 bdev->need_dma32 = need_dma32;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001545
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001546 mutex_lock(&glob->device_list_mutex);
1547 list_add_tail(&bdev->device_list, &glob->device_list);
1548 mutex_unlock(&glob->device_list_mutex);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001549
1550 return 0;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001551out_no_addr_mm:
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001552 ttm_bo_clean_mm(bdev, 0);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001553out_no_sys:
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001554 return ret;
1555}
1556EXPORT_SYMBOL(ttm_bo_device_init);
1557
1558/*
1559 * buffer object vm functions.
1560 */
1561
1562bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1563{
1564 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1565
1566 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1567 if (mem->mem_type == TTM_PL_SYSTEM)
1568 return false;
1569
1570 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1571 return false;
1572
1573 if (mem->placement & TTM_PL_FLAG_CACHED)
1574 return false;
1575 }
1576 return true;
1577}
1578
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001579void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1580{
1581 struct ttm_bo_device *bdev = bo->bdev;
1582 loff_t offset = (loff_t) bo->addr_space_offset;
1583 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1584
1585 if (!bdev->dev_mapping)
1586 return;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001587 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
Jerome Glisse82c5da62010-04-09 14:39:23 +02001588 ttm_mem_io_free(bdev, &bo->mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001589}
Dave Airliee024e112009-06-24 09:48:08 +10001590EXPORT_SYMBOL(ttm_bo_unmap_virtual);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001591
1592static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1593{
1594 struct ttm_bo_device *bdev = bo->bdev;
1595 struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1596 struct rb_node *parent = NULL;
1597 struct ttm_buffer_object *cur_bo;
1598 unsigned long offset = bo->vm_node->start;
1599 unsigned long cur_offset;
1600
1601 while (*cur) {
1602 parent = *cur;
1603 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1604 cur_offset = cur_bo->vm_node->start;
1605 if (offset < cur_offset)
1606 cur = &parent->rb_left;
1607 else if (offset > cur_offset)
1608 cur = &parent->rb_right;
1609 else
1610 BUG();
1611 }
1612
1613 rb_link_node(&bo->vm_rb, parent, cur);
1614 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1615}
1616
1617/**
1618 * ttm_bo_setup_vm:
1619 *
1620 * @bo: the buffer to allocate address space for
1621 *
1622 * Allocate address space in the drm device so that applications
1623 * can mmap the buffer and access the contents. This only
1624 * applies to ttm_bo_type_device objects as others are not
1625 * placed in the drm device address space.
1626 */
1627
1628static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1629{
1630 struct ttm_bo_device *bdev = bo->bdev;
1631 int ret;
1632
1633retry_pre_get:
1634 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1635 if (unlikely(ret != 0))
1636 return ret;
1637
1638 write_lock(&bdev->vm_lock);
1639 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1640 bo->mem.num_pages, 0, 0);
1641
1642 if (unlikely(bo->vm_node == NULL)) {
1643 ret = -ENOMEM;
1644 goto out_unlock;
1645 }
1646
1647 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1648 bo->mem.num_pages, 0);
1649
1650 if (unlikely(bo->vm_node == NULL)) {
1651 write_unlock(&bdev->vm_lock);
1652 goto retry_pre_get;
1653 }
1654
1655 ttm_bo_vm_insert_rb(bo);
1656 write_unlock(&bdev->vm_lock);
1657 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1658
1659 return 0;
1660out_unlock:
1661 write_unlock(&bdev->vm_lock);
1662 return ret;
1663}
1664
1665int ttm_bo_wait(struct ttm_buffer_object *bo,
1666 bool lazy, bool interruptible, bool no_wait)
1667{
1668 struct ttm_bo_driver *driver = bo->bdev->driver;
1669 void *sync_obj;
1670 void *sync_obj_arg;
1671 int ret = 0;
1672
1673 if (likely(bo->sync_obj == NULL))
1674 return 0;
1675
1676 while (bo->sync_obj) {
1677
1678 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1679 void *tmp_obj = bo->sync_obj;
1680 bo->sync_obj = NULL;
1681 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1682 spin_unlock(&bo->lock);
1683 driver->sync_obj_unref(&tmp_obj);
1684 spin_lock(&bo->lock);
1685 continue;
1686 }
1687
1688 if (no_wait)
1689 return -EBUSY;
1690
1691 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1692 sync_obj_arg = bo->sync_obj_arg;
1693 spin_unlock(&bo->lock);
1694 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1695 lazy, interruptible);
1696 if (unlikely(ret != 0)) {
1697 driver->sync_obj_unref(&sync_obj);
1698 spin_lock(&bo->lock);
1699 return ret;
1700 }
1701 spin_lock(&bo->lock);
1702 if (likely(bo->sync_obj == sync_obj &&
1703 bo->sync_obj_arg == sync_obj_arg)) {
1704 void *tmp_obj = bo->sync_obj;
1705 bo->sync_obj = NULL;
1706 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1707 &bo->priv_flags);
1708 spin_unlock(&bo->lock);
1709 driver->sync_obj_unref(&sync_obj);
1710 driver->sync_obj_unref(&tmp_obj);
1711 spin_lock(&bo->lock);
Thomas Hellstromfee280d2009-08-03 12:39:06 +02001712 } else {
1713 spin_unlock(&bo->lock);
1714 driver->sync_obj_unref(&sync_obj);
1715 spin_lock(&bo->lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001716 }
1717 }
1718 return 0;
1719}
1720EXPORT_SYMBOL(ttm_bo_wait);
1721
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001722int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1723{
1724 int ret = 0;
1725
1726 /*
Thomas Hellstrom8cfe92d2010-04-28 11:33:25 +02001727 * Using ttm_bo_reserve makes sure the lru lists are updated.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001728 */
1729
1730 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1731 if (unlikely(ret != 0))
1732 return ret;
1733 spin_lock(&bo->lock);
1734 ret = ttm_bo_wait(bo, false, true, no_wait);
1735 spin_unlock(&bo->lock);
1736 if (likely(ret == 0))
1737 atomic_inc(&bo->cpu_writers);
1738 ttm_bo_unreserve(bo);
1739 return ret;
1740}
Ben Skeggsd1ede142009-12-11 15:13:00 +10001741EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001742
1743void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1744{
1745 if (atomic_dec_and_test(&bo->cpu_writers))
1746 wake_up_all(&bo->event_queue);
1747}
Ben Skeggsd1ede142009-12-11 15:13:00 +10001748EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001749
1750/**
1751 * A buffer object shrink method that tries to swap out the first
1752 * buffer object on the bo_global::swap_lru list.
1753 */
1754
1755static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1756{
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001757 struct ttm_bo_global *glob =
1758 container_of(shrink, struct ttm_bo_global, shrink);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001759 struct ttm_buffer_object *bo;
1760 int ret = -EBUSY;
1761 int put_count;
1762 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1763
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001764 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001765 while (ret == -EBUSY) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001766 if (unlikely(list_empty(&glob->swap_lru))) {
1767 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001768 return -EBUSY;
1769 }
1770
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001771 bo = list_first_entry(&glob->swap_lru,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001772 struct ttm_buffer_object, swap);
1773 kref_get(&bo->list_kref);
1774
1775 /**
1776 * Reserve buffer. Since we unlock while sleeping, we need
1777 * to re-check that nobody removed us from the swap-list while
1778 * we slept.
1779 */
1780
1781 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1782 if (unlikely(ret == -EBUSY)) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001783 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001784 ttm_bo_wait_unreserved(bo, false);
1785 kref_put(&bo->list_kref, ttm_bo_release_list);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001786 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001787 }
1788 }
1789
1790 BUG_ON(ret != 0);
1791 put_count = ttm_bo_del_from_lru(bo);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001792 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001793
1794 while (put_count--)
1795 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1796
1797 /**
1798 * Wait for GPU, then move to system cached.
1799 */
1800
1801 spin_lock(&bo->lock);
1802 ret = ttm_bo_wait(bo, false, false, false);
1803 spin_unlock(&bo->lock);
1804
1805 if (unlikely(ret != 0))
1806 goto out;
1807
1808 if ((bo->mem.placement & swap_placement) != swap_placement) {
1809 struct ttm_mem_reg evict_mem;
1810
1811 evict_mem = bo->mem;
1812 evict_mem.mm_node = NULL;
1813 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1814 evict_mem.mem_type = TTM_PL_SYSTEM;
1815
1816 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001817 false, false, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001818 if (unlikely(ret != 0))
1819 goto out;
1820 }
1821
1822 ttm_bo_unmap_virtual(bo);
1823
1824 /**
1825 * Swap out. Buffer will be swapped in again as soon as
1826 * anyone tries to access a ttm page.
1827 */
1828
Thomas Hellstrom3f09ea42010-01-13 22:28:40 +01001829 if (bo->bdev->driver->swap_notify)
1830 bo->bdev->driver->swap_notify(bo);
1831
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001832 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1833out:
1834
1835 /**
1836 *
1837 * Unreserve without putting on LRU to avoid swapping out an
1838 * already swapped buffer.
1839 */
1840
1841 atomic_set(&bo->reserved, 0);
1842 wake_up_all(&bo->event_queue);
1843 kref_put(&bo->list_kref, ttm_bo_release_list);
1844 return ret;
1845}
1846
1847void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1848{
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001849 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001850 ;
1851}
Thomas Hellstrome99e1e72010-01-13 22:28:42 +01001852EXPORT_SYMBOL(ttm_bo_swapout_all);