blob: bab6cd8d8a1e98aad0296b0014c15ac7f6d12717 [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020031#include <linux/sched.h>
32#include <linux/highmem.h>
33#include <linux/pagemap.h>
34#include <linux/file.h>
35#include <linux/swap.h>
Dave Airliec9c97b82009-08-27 09:53:47 +100036#include "drm_cache.h"
Dave Airlie72e942d2010-03-09 06:33:26 +000037#include "drm_mem_util.h"
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020038#include "ttm/ttm_module.h"
39#include "ttm/ttm_bo_driver.h"
40#include "ttm/ttm_placement.h"
41
42static int ttm_tt_swapin(struct ttm_tt *ttm);
43
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020044/**
45 * Allocates storage for pointers to the pages that back the ttm.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020046 */
47static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
48{
Dave Airlie72e942d2010-03-09 06:33:26 +000049 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020050}
51
52static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
53{
Dave Airlie72e942d2010-03-09 06:33:26 +000054 drm_free_large(ttm->pages);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020055 ttm->pages = NULL;
56}
57
58static struct page *ttm_tt_alloc_page(unsigned page_flags)
59{
Dave Airlieb42db2b2009-07-29 16:56:52 +100060 gfp_t gfp_flags = GFP_USER;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020061
Dave Airliead49f502009-07-10 22:36:26 +100062 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
63 gfp_flags |= __GFP_ZERO;
64
65 if (page_flags & TTM_PAGE_FLAG_DMA32)
66 gfp_flags |= __GFP_DMA32;
Dave Airlieb42db2b2009-07-29 16:56:52 +100067 else
68 gfp_flags |= __GFP_HIGHMEM;
Dave Airliead49f502009-07-10 22:36:26 +100069
70 return alloc_page(gfp_flags);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020071}
72
73static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
74{
75 int write;
76 int dirty;
77 struct page *page;
78 int i;
79 struct ttm_backend *be = ttm->be;
80
81 BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
82 write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
83 dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
84
85 if (be)
86 be->func->clear(be);
87
88 for (i = 0; i < ttm->num_pages; ++i) {
89 page = ttm->pages[i];
90 if (page == NULL)
91 continue;
92
93 if (page == ttm->dummy_read_page) {
94 BUG_ON(write);
95 continue;
96 }
97
98 if (write && dirty && !PageReserved(page))
99 set_page_dirty_lock(page);
100
101 ttm->pages[i] = NULL;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200102 ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200103 put_page(page);
104 }
105 ttm->state = tt_unpopulated;
106 ttm->first_himem_page = ttm->num_pages;
107 ttm->last_lomem_page = -1;
108}
109
110static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
111{
112 struct page *p;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200113 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200114 int ret;
115
116 while (NULL == (p = ttm->pages[index])) {
117 p = ttm_tt_alloc_page(ttm->page_flags);
118
119 if (!p)
120 return NULL;
121
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200122 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
123 if (unlikely(ret != 0))
124 goto out_err;
125
126 if (PageHighMem(p))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200127 ttm->pages[--ttm->first_himem_page] = p;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200128 else
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200129 ttm->pages[++ttm->last_lomem_page] = p;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200130 }
131 return p;
132out_err:
133 put_page(p);
134 return NULL;
135}
136
137struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
138{
139 int ret;
140
141 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
142 ret = ttm_tt_swapin(ttm);
143 if (unlikely(ret != 0))
144 return NULL;
145 }
146 return __ttm_tt_get_page(ttm, index);
147}
148
149int ttm_tt_populate(struct ttm_tt *ttm)
150{
151 struct page *page;
152 unsigned long i;
153 struct ttm_backend *be;
154 int ret;
155
156 if (ttm->state != tt_unpopulated)
157 return 0;
158
159 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
160 ret = ttm_tt_swapin(ttm);
161 if (unlikely(ret != 0))
162 return ret;
163 }
164
165 be = ttm->be;
166
167 for (i = 0; i < ttm->num_pages; ++i) {
168 page = __ttm_tt_get_page(ttm, i);
169 if (!page)
170 return -ENOMEM;
171 }
172
173 be->func->populate(be, ttm->num_pages, ttm->pages,
174 ttm->dummy_read_page);
175 ttm->state = tt_unbound;
176 return 0;
177}
Thomas Hellstrom4bfd75c2009-12-06 21:46:27 +0100178EXPORT_SYMBOL(ttm_tt_populate);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200179
180#ifdef CONFIG_X86
181static inline int ttm_tt_set_page_caching(struct page *p,
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000182 enum ttm_caching_state c_old,
183 enum ttm_caching_state c_new)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200184{
Francisco Jerezdb78e272010-01-12 18:49:43 +0100185 int ret = 0;
186
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200187 if (PageHighMem(p))
188 return 0;
189
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000190 if (c_old != tt_cached) {
Francisco Jerezdb78e272010-01-12 18:49:43 +0100191 /* p isn't in the default caching state, set it to
192 * writeback first to free its current memtype. */
193
194 ret = set_pages_wb(p, 1);
195 if (ret)
196 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200197 }
Francisco Jerezdb78e272010-01-12 18:49:43 +0100198
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000199 if (c_new == tt_wc)
Francisco Jerezdb78e272010-01-12 18:49:43 +0100200 ret = set_memory_wc((unsigned long) page_address(p), 1);
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000201 else if (c_new == tt_uncached)
Francisco Jerezdb78e272010-01-12 18:49:43 +0100202 ret = set_pages_uc(p, 1);
203
204 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200205}
206#else /* CONFIG_X86 */
207static inline int ttm_tt_set_page_caching(struct page *p,
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000208 enum ttm_caching_state c_old,
209 enum ttm_caching_state c_new)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200210{
211 return 0;
212}
213#endif /* CONFIG_X86 */
214
215/*
216 * Change caching policy for the linear kernel map
217 * for range of pages in a ttm.
218 */
219
220static int ttm_tt_set_caching(struct ttm_tt *ttm,
221 enum ttm_caching_state c_state)
222{
223 int i, j;
224 struct page *cur_page;
225 int ret;
226
227 if (ttm->caching_state == c_state)
228 return 0;
229
230 if (c_state != tt_cached) {
231 ret = ttm_tt_populate(ttm);
232 if (unlikely(ret != 0))
233 return ret;
234 }
235
236 if (ttm->caching_state == tt_cached)
Dave Airliec9c97b82009-08-27 09:53:47 +1000237 drm_clflush_pages(ttm->pages, ttm->num_pages);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200238
239 for (i = 0; i < ttm->num_pages; ++i) {
240 cur_page = ttm->pages[i];
241 if (likely(cur_page != NULL)) {
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000242 ret = ttm_tt_set_page_caching(cur_page,
243 ttm->caching_state,
244 c_state);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200245 if (unlikely(ret != 0))
246 goto out_err;
247 }
248 }
249
250 ttm->caching_state = c_state;
251
252 return 0;
253
254out_err:
255 for (j = 0; j < i; ++j) {
256 cur_page = ttm->pages[j];
257 if (likely(cur_page != NULL)) {
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000258 (void)ttm_tt_set_page_caching(cur_page, c_state,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200259 ttm->caching_state);
260 }
261 }
262
263 return ret;
264}
265
266int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
267{
268 enum ttm_caching_state state;
269
270 if (placement & TTM_PL_FLAG_WC)
271 state = tt_wc;
272 else if (placement & TTM_PL_FLAG_UNCACHED)
273 state = tt_uncached;
274 else
275 state = tt_cached;
276
277 return ttm_tt_set_caching(ttm, state);
278}
Dave Airliedf67bed2009-10-30 13:31:26 +1000279EXPORT_SYMBOL(ttm_tt_set_placement_caching);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200280
281static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
282{
283 int i;
284 struct page *cur_page;
285 struct ttm_backend *be = ttm->be;
286
287 if (be)
288 be->func->clear(be);
289 (void)ttm_tt_set_caching(ttm, tt_cached);
290 for (i = 0; i < ttm->num_pages; ++i) {
291 cur_page = ttm->pages[i];
292 ttm->pages[i] = NULL;
293 if (cur_page) {
294 if (page_count(cur_page) != 1)
295 printk(KERN_ERR TTM_PFX
296 "Erroneous page count. "
297 "Leaking pages.\n");
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200298 ttm_mem_global_free_page(ttm->glob->mem_glob,
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200299 cur_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200300 __free_page(cur_page);
301 }
302 }
303 ttm->state = tt_unpopulated;
304 ttm->first_himem_page = ttm->num_pages;
305 ttm->last_lomem_page = -1;
306}
307
308void ttm_tt_destroy(struct ttm_tt *ttm)
309{
310 struct ttm_backend *be;
311
312 if (unlikely(ttm == NULL))
313 return;
314
315 be = ttm->be;
316 if (likely(be != NULL)) {
317 be->func->destroy(be);
318 ttm->be = NULL;
319 }
320
321 if (likely(ttm->pages != NULL)) {
322 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
323 ttm_tt_free_user_pages(ttm);
324 else
325 ttm_tt_free_alloced_pages(ttm);
326
327 ttm_tt_free_page_directory(ttm);
328 }
329
330 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
331 ttm->swap_storage)
332 fput(ttm->swap_storage);
333
334 kfree(ttm);
335}
336
337int ttm_tt_set_user(struct ttm_tt *ttm,
338 struct task_struct *tsk,
339 unsigned long start, unsigned long num_pages)
340{
341 struct mm_struct *mm = tsk->mm;
342 int ret;
343 int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200344 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200345
346 BUG_ON(num_pages != ttm->num_pages);
347 BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
348
349 /**
350 * Account user pages as lowmem pages for now.
351 */
352
353 ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200354 false, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200355 if (unlikely(ret != 0))
356 return ret;
357
358 down_read(&mm->mmap_sem);
359 ret = get_user_pages(tsk, mm, start, num_pages,
360 write, 0, ttm->pages, NULL);
361 up_read(&mm->mmap_sem);
362
363 if (ret != num_pages && write) {
364 ttm_tt_free_user_pages(ttm);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200365 ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200366 return -ENOMEM;
367 }
368
369 ttm->tsk = tsk;
370 ttm->start = start;
371 ttm->state = tt_unbound;
372
373 return 0;
374}
375
376struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
377 uint32_t page_flags, struct page *dummy_read_page)
378{
379 struct ttm_bo_driver *bo_driver = bdev->driver;
380 struct ttm_tt *ttm;
381
382 if (!bo_driver)
383 return NULL;
384
385 ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
386 if (!ttm)
387 return NULL;
388
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200389 ttm->glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200390 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
391 ttm->first_himem_page = ttm->num_pages;
392 ttm->last_lomem_page = -1;
393 ttm->caching_state = tt_cached;
394 ttm->page_flags = page_flags;
395
396 ttm->dummy_read_page = dummy_read_page;
397
398 ttm_tt_alloc_page_directory(ttm);
399 if (!ttm->pages) {
400 ttm_tt_destroy(ttm);
401 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
402 return NULL;
403 }
404 ttm->be = bo_driver->create_ttm_backend_entry(bdev);
405 if (!ttm->be) {
406 ttm_tt_destroy(ttm);
407 printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
408 return NULL;
409 }
410 ttm->state = tt_unpopulated;
411 return ttm;
412}
413
414void ttm_tt_unbind(struct ttm_tt *ttm)
415{
416 int ret;
417 struct ttm_backend *be = ttm->be;
418
419 if (ttm->state == tt_bound) {
420 ret = be->func->unbind(be);
421 BUG_ON(ret);
422 ttm->state = tt_unbound;
423 }
424}
425
426int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
427{
428 int ret = 0;
429 struct ttm_backend *be;
430
431 if (!ttm)
432 return -EINVAL;
433
434 if (ttm->state == tt_bound)
435 return 0;
436
437 be = ttm->be;
438
439 ret = ttm_tt_populate(ttm);
440 if (ret)
441 return ret;
442
443 ret = be->func->bind(be, bo_mem);
444 if (ret) {
445 printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
446 return ret;
447 }
448
449 ttm->state = tt_bound;
450
451 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
452 ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
453 return 0;
454}
455EXPORT_SYMBOL(ttm_tt_bind);
456
457static int ttm_tt_swapin(struct ttm_tt *ttm)
458{
459 struct address_space *swap_space;
460 struct file *swap_storage;
461 struct page *from_page;
462 struct page *to_page;
463 void *from_virtual;
464 void *to_virtual;
465 int i;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100466 int ret = -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200467
468 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
469 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
470 ttm->num_pages);
471 if (unlikely(ret != 0))
472 return ret;
473
474 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
475 return 0;
476 }
477
478 swap_storage = ttm->swap_storage;
479 BUG_ON(swap_storage == NULL);
480
481 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
482
483 for (i = 0; i < ttm->num_pages; ++i) {
484 from_page = read_mapping_page(swap_space, i, NULL);
Maarten Maathuis290e55052010-02-20 03:22:21 +0100485 if (IS_ERR(from_page)) {
486 ret = PTR_ERR(from_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200487 goto out_err;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100488 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200489 to_page = __ttm_tt_get_page(ttm, i);
490 if (unlikely(to_page == NULL))
491 goto out_err;
492
493 preempt_disable();
494 from_virtual = kmap_atomic(from_page, KM_USER0);
495 to_virtual = kmap_atomic(to_page, KM_USER1);
496 memcpy(to_virtual, from_virtual, PAGE_SIZE);
497 kunmap_atomic(to_virtual, KM_USER1);
498 kunmap_atomic(from_virtual, KM_USER0);
499 preempt_enable();
500 page_cache_release(from_page);
501 }
502
503 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
504 fput(swap_storage);
505 ttm->swap_storage = NULL;
506 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
507
508 return 0;
509out_err:
510 ttm_tt_free_alloced_pages(ttm);
Maarten Maathuis290e55052010-02-20 03:22:21 +0100511 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200512}
513
514int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
515{
516 struct address_space *swap_space;
517 struct file *swap_storage;
518 struct page *from_page;
519 struct page *to_page;
520 void *from_virtual;
521 void *to_virtual;
522 int i;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100523 int ret = -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200524
525 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
526 BUG_ON(ttm->caching_state != tt_cached);
527
528 /*
529 * For user buffers, just unpin the pages, as there should be
530 * vma references.
531 */
532
533 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
534 ttm_tt_free_user_pages(ttm);
535 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
536 ttm->swap_storage = NULL;
537 return 0;
538 }
539
540 if (!persistant_swap_storage) {
541 swap_storage = shmem_file_setup("ttm swap",
542 ttm->num_pages << PAGE_SHIFT,
543 0);
544 if (unlikely(IS_ERR(swap_storage))) {
545 printk(KERN_ERR "Failed allocating swap storage.\n");
Maarten Maathuis290e55052010-02-20 03:22:21 +0100546 return PTR_ERR(swap_storage);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200547 }
548 } else
549 swap_storage = persistant_swap_storage;
550
551 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
552
553 for (i = 0; i < ttm->num_pages; ++i) {
554 from_page = ttm->pages[i];
555 if (unlikely(from_page == NULL))
556 continue;
557 to_page = read_mapping_page(swap_space, i, NULL);
Maarten Maathuis290e55052010-02-20 03:22:21 +0100558 if (unlikely(IS_ERR(to_page))) {
559 ret = PTR_ERR(to_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200560 goto out_err;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100561 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200562 preempt_disable();
563 from_virtual = kmap_atomic(from_page, KM_USER0);
564 to_virtual = kmap_atomic(to_page, KM_USER1);
565 memcpy(to_virtual, from_virtual, PAGE_SIZE);
566 kunmap_atomic(to_virtual, KM_USER1);
567 kunmap_atomic(from_virtual, KM_USER0);
568 preempt_enable();
569 set_page_dirty(to_page);
570 mark_page_accessed(to_page);
571 page_cache_release(to_page);
572 }
573
574 ttm_tt_free_alloced_pages(ttm);
575 ttm->swap_storage = swap_storage;
576 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
577 if (persistant_swap_storage)
578 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
579
580 return 0;
581out_err:
582 if (!persistant_swap_storage)
583 fput(swap_storage);
584
Maarten Maathuis290e55052010-02-20 03:22:21 +0100585 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200586}