blob: af789dc869b94e997621ff615c184383c857377c [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020031#include <linux/sched.h>
32#include <linux/highmem.h>
33#include <linux/pagemap.h>
34#include <linux/file.h>
35#include <linux/swap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Dave Airliec9c97b82009-08-27 09:53:47 +100037#include "drm_cache.h"
Dave Airlie72e942d2010-03-09 06:33:26 +000038#include "drm_mem_util.h"
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020039#include "ttm/ttm_module.h"
40#include "ttm/ttm_bo_driver.h"
41#include "ttm/ttm_placement.h"
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000042#include "ttm/ttm_page_alloc.h"
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020043
44static int ttm_tt_swapin(struct ttm_tt *ttm);
45
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020046/**
47 * Allocates storage for pointers to the pages that back the ttm.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020048 */
49static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
50{
Dave Airlie72e942d2010-03-09 06:33:26 +000051 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020052}
53
54static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
55{
Dave Airlie72e942d2010-03-09 06:33:26 +000056 drm_free_large(ttm->pages);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020057 ttm->pages = NULL;
58}
59
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020060static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
61{
62 int write;
63 int dirty;
64 struct page *page;
65 int i;
66 struct ttm_backend *be = ttm->be;
67
68 BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
69 write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
70 dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
71
72 if (be)
73 be->func->clear(be);
74
75 for (i = 0; i < ttm->num_pages; ++i) {
76 page = ttm->pages[i];
77 if (page == NULL)
78 continue;
79
80 if (page == ttm->dummy_read_page) {
81 BUG_ON(write);
82 continue;
83 }
84
85 if (write && dirty && !PageReserved(page))
86 set_page_dirty_lock(page);
87
88 ttm->pages[i] = NULL;
Thomas Hellstroma987fca2009-08-18 16:51:56 +020089 ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020090 put_page(page);
91 }
92 ttm->state = tt_unpopulated;
93 ttm->first_himem_page = ttm->num_pages;
94 ttm->last_lomem_page = -1;
95}
96
97static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
98{
99 struct page *p;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000100 struct list_head h;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200101 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200102 int ret;
103
104 while (NULL == (p = ttm->pages[index])) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200105
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000106 INIT_LIST_HEAD(&h);
107
108 ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1);
109
110 if (ret != 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200111 return NULL;
112
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000113 p = list_first_entry(&h, struct page, lru);
114
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200115 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
116 if (unlikely(ret != 0))
117 goto out_err;
118
119 if (PageHighMem(p))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200120 ttm->pages[--ttm->first_himem_page] = p;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200121 else
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200122 ttm->pages[++ttm->last_lomem_page] = p;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200123 }
124 return p;
125out_err:
126 put_page(p);
127 return NULL;
128}
129
130struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
131{
132 int ret;
133
134 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
135 ret = ttm_tt_swapin(ttm);
136 if (unlikely(ret != 0))
137 return NULL;
138 }
139 return __ttm_tt_get_page(ttm, index);
140}
141
142int ttm_tt_populate(struct ttm_tt *ttm)
143{
144 struct page *page;
145 unsigned long i;
146 struct ttm_backend *be;
147 int ret;
148
149 if (ttm->state != tt_unpopulated)
150 return 0;
151
152 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
153 ret = ttm_tt_swapin(ttm);
154 if (unlikely(ret != 0))
155 return ret;
156 }
157
158 be = ttm->be;
159
160 for (i = 0; i < ttm->num_pages; ++i) {
161 page = __ttm_tt_get_page(ttm, i);
162 if (!page)
163 return -ENOMEM;
164 }
165
166 be->func->populate(be, ttm->num_pages, ttm->pages,
167 ttm->dummy_read_page);
168 ttm->state = tt_unbound;
169 return 0;
170}
Thomas Hellstrom4bfd75c2009-12-06 21:46:27 +0100171EXPORT_SYMBOL(ttm_tt_populate);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200172
173#ifdef CONFIG_X86
174static inline int ttm_tt_set_page_caching(struct page *p,
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000175 enum ttm_caching_state c_old,
176 enum ttm_caching_state c_new)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200177{
Francisco Jerezdb78e272010-01-12 18:49:43 +0100178 int ret = 0;
179
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200180 if (PageHighMem(p))
181 return 0;
182
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000183 if (c_old != tt_cached) {
Francisco Jerezdb78e272010-01-12 18:49:43 +0100184 /* p isn't in the default caching state, set it to
185 * writeback first to free its current memtype. */
186
187 ret = set_pages_wb(p, 1);
188 if (ret)
189 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200190 }
Francisco Jerezdb78e272010-01-12 18:49:43 +0100191
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000192 if (c_new == tt_wc)
Francisco Jerezdb78e272010-01-12 18:49:43 +0100193 ret = set_memory_wc((unsigned long) page_address(p), 1);
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000194 else if (c_new == tt_uncached)
Francisco Jerezdb78e272010-01-12 18:49:43 +0100195 ret = set_pages_uc(p, 1);
196
197 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200198}
199#else /* CONFIG_X86 */
200static inline int ttm_tt_set_page_caching(struct page *p,
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000201 enum ttm_caching_state c_old,
202 enum ttm_caching_state c_new)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200203{
204 return 0;
205}
206#endif /* CONFIG_X86 */
207
208/*
209 * Change caching policy for the linear kernel map
210 * for range of pages in a ttm.
211 */
212
213static int ttm_tt_set_caching(struct ttm_tt *ttm,
214 enum ttm_caching_state c_state)
215{
216 int i, j;
217 struct page *cur_page;
218 int ret;
219
220 if (ttm->caching_state == c_state)
221 return 0;
222
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000223 if (ttm->state == tt_unpopulated) {
224 /* Change caching but don't populate */
225 ttm->caching_state = c_state;
226 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200227 }
228
229 if (ttm->caching_state == tt_cached)
Dave Airliec9c97b82009-08-27 09:53:47 +1000230 drm_clflush_pages(ttm->pages, ttm->num_pages);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200231
232 for (i = 0; i < ttm->num_pages; ++i) {
233 cur_page = ttm->pages[i];
234 if (likely(cur_page != NULL)) {
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000235 ret = ttm_tt_set_page_caching(cur_page,
236 ttm->caching_state,
237 c_state);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200238 if (unlikely(ret != 0))
239 goto out_err;
240 }
241 }
242
243 ttm->caching_state = c_state;
244
245 return 0;
246
247out_err:
248 for (j = 0; j < i; ++j) {
249 cur_page = ttm->pages[j];
250 if (likely(cur_page != NULL)) {
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000251 (void)ttm_tt_set_page_caching(cur_page, c_state,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200252 ttm->caching_state);
253 }
254 }
255
256 return ret;
257}
258
259int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
260{
261 enum ttm_caching_state state;
262
263 if (placement & TTM_PL_FLAG_WC)
264 state = tt_wc;
265 else if (placement & TTM_PL_FLAG_UNCACHED)
266 state = tt_uncached;
267 else
268 state = tt_cached;
269
270 return ttm_tt_set_caching(ttm, state);
271}
Dave Airliedf67bed2009-10-30 13:31:26 +1000272EXPORT_SYMBOL(ttm_tt_set_placement_caching);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200273
274static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
275{
276 int i;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000277 unsigned count = 0;
278 struct list_head h;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200279 struct page *cur_page;
280 struct ttm_backend *be = ttm->be;
281
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000282 INIT_LIST_HEAD(&h);
283
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200284 if (be)
285 be->func->clear(be);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200286 for (i = 0; i < ttm->num_pages; ++i) {
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000287
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200288 cur_page = ttm->pages[i];
289 ttm->pages[i] = NULL;
290 if (cur_page) {
291 if (page_count(cur_page) != 1)
292 printk(KERN_ERR TTM_PFX
293 "Erroneous page count. "
294 "Leaking pages.\n");
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200295 ttm_mem_global_free_page(ttm->glob->mem_glob,
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200296 cur_page);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000297 list_add(&cur_page->lru, &h);
298 count++;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200299 }
300 }
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000301 ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200302 ttm->state = tt_unpopulated;
303 ttm->first_himem_page = ttm->num_pages;
304 ttm->last_lomem_page = -1;
305}
306
307void ttm_tt_destroy(struct ttm_tt *ttm)
308{
309 struct ttm_backend *be;
310
311 if (unlikely(ttm == NULL))
312 return;
313
314 be = ttm->be;
315 if (likely(be != NULL)) {
316 be->func->destroy(be);
317 ttm->be = NULL;
318 }
319
320 if (likely(ttm->pages != NULL)) {
321 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
322 ttm_tt_free_user_pages(ttm);
323 else
324 ttm_tt_free_alloced_pages(ttm);
325
326 ttm_tt_free_page_directory(ttm);
327 }
328
329 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
330 ttm->swap_storage)
331 fput(ttm->swap_storage);
332
333 kfree(ttm);
334}
335
336int ttm_tt_set_user(struct ttm_tt *ttm,
337 struct task_struct *tsk,
338 unsigned long start, unsigned long num_pages)
339{
340 struct mm_struct *mm = tsk->mm;
341 int ret;
342 int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200343 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200344
345 BUG_ON(num_pages != ttm->num_pages);
346 BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
347
348 /**
349 * Account user pages as lowmem pages for now.
350 */
351
352 ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200353 false, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200354 if (unlikely(ret != 0))
355 return ret;
356
357 down_read(&mm->mmap_sem);
358 ret = get_user_pages(tsk, mm, start, num_pages,
359 write, 0, ttm->pages, NULL);
360 up_read(&mm->mmap_sem);
361
362 if (ret != num_pages && write) {
363 ttm_tt_free_user_pages(ttm);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200364 ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200365 return -ENOMEM;
366 }
367
368 ttm->tsk = tsk;
369 ttm->start = start;
370 ttm->state = tt_unbound;
371
372 return 0;
373}
374
375struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
376 uint32_t page_flags, struct page *dummy_read_page)
377{
378 struct ttm_bo_driver *bo_driver = bdev->driver;
379 struct ttm_tt *ttm;
380
381 if (!bo_driver)
382 return NULL;
383
384 ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
385 if (!ttm)
386 return NULL;
387
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200388 ttm->glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200389 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
390 ttm->first_himem_page = ttm->num_pages;
391 ttm->last_lomem_page = -1;
392 ttm->caching_state = tt_cached;
393 ttm->page_flags = page_flags;
394
395 ttm->dummy_read_page = dummy_read_page;
396
397 ttm_tt_alloc_page_directory(ttm);
398 if (!ttm->pages) {
399 ttm_tt_destroy(ttm);
400 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
401 return NULL;
402 }
403 ttm->be = bo_driver->create_ttm_backend_entry(bdev);
404 if (!ttm->be) {
405 ttm_tt_destroy(ttm);
406 printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
407 return NULL;
408 }
409 ttm->state = tt_unpopulated;
410 return ttm;
411}
412
413void ttm_tt_unbind(struct ttm_tt *ttm)
414{
415 int ret;
416 struct ttm_backend *be = ttm->be;
417
418 if (ttm->state == tt_bound) {
419 ret = be->func->unbind(be);
420 BUG_ON(ret);
421 ttm->state = tt_unbound;
422 }
423}
424
425int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
426{
427 int ret = 0;
428 struct ttm_backend *be;
429
430 if (!ttm)
431 return -EINVAL;
432
433 if (ttm->state == tt_bound)
434 return 0;
435
436 be = ttm->be;
437
438 ret = ttm_tt_populate(ttm);
439 if (ret)
440 return ret;
441
442 ret = be->func->bind(be, bo_mem);
Thomas Hellstrom7dcebb52010-10-29 10:46:49 +0200443 if (unlikely(ret != 0))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200444 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200445
446 ttm->state = tt_bound;
447
448 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
449 ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
450 return 0;
451}
452EXPORT_SYMBOL(ttm_tt_bind);
453
454static int ttm_tt_swapin(struct ttm_tt *ttm)
455{
456 struct address_space *swap_space;
457 struct file *swap_storage;
458 struct page *from_page;
459 struct page *to_page;
460 void *from_virtual;
461 void *to_virtual;
462 int i;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100463 int ret = -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200464
465 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
466 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
467 ttm->num_pages);
468 if (unlikely(ret != 0))
469 return ret;
470
471 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
472 return 0;
473 }
474
475 swap_storage = ttm->swap_storage;
476 BUG_ON(swap_storage == NULL);
477
478 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
479
480 for (i = 0; i < ttm->num_pages; ++i) {
481 from_page = read_mapping_page(swap_space, i, NULL);
Maarten Maathuis290e55052010-02-20 03:22:21 +0100482 if (IS_ERR(from_page)) {
483 ret = PTR_ERR(from_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200484 goto out_err;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100485 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200486 to_page = __ttm_tt_get_page(ttm, i);
487 if (unlikely(to_page == NULL))
488 goto out_err;
489
490 preempt_disable();
491 from_virtual = kmap_atomic(from_page, KM_USER0);
492 to_virtual = kmap_atomic(to_page, KM_USER1);
493 memcpy(to_virtual, from_virtual, PAGE_SIZE);
494 kunmap_atomic(to_virtual, KM_USER1);
495 kunmap_atomic(from_virtual, KM_USER0);
496 preempt_enable();
497 page_cache_release(from_page);
498 }
499
500 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
501 fput(swap_storage);
502 ttm->swap_storage = NULL;
503 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
504
505 return 0;
506out_err:
507 ttm_tt_free_alloced_pages(ttm);
Maarten Maathuis290e55052010-02-20 03:22:21 +0100508 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200509}
510
511int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
512{
513 struct address_space *swap_space;
514 struct file *swap_storage;
515 struct page *from_page;
516 struct page *to_page;
517 void *from_virtual;
518 void *to_virtual;
519 int i;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100520 int ret = -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200521
522 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
523 BUG_ON(ttm->caching_state != tt_cached);
524
525 /*
526 * For user buffers, just unpin the pages, as there should be
527 * vma references.
528 */
529
530 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
531 ttm_tt_free_user_pages(ttm);
532 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
533 ttm->swap_storage = NULL;
534 return 0;
535 }
536
537 if (!persistant_swap_storage) {
538 swap_storage = shmem_file_setup("ttm swap",
539 ttm->num_pages << PAGE_SHIFT,
540 0);
541 if (unlikely(IS_ERR(swap_storage))) {
542 printk(KERN_ERR "Failed allocating swap storage.\n");
Maarten Maathuis290e55052010-02-20 03:22:21 +0100543 return PTR_ERR(swap_storage);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200544 }
545 } else
546 swap_storage = persistant_swap_storage;
547
548 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
549
550 for (i = 0; i < ttm->num_pages; ++i) {
551 from_page = ttm->pages[i];
552 if (unlikely(from_page == NULL))
553 continue;
554 to_page = read_mapping_page(swap_space, i, NULL);
Maarten Maathuis290e55052010-02-20 03:22:21 +0100555 if (unlikely(IS_ERR(to_page))) {
556 ret = PTR_ERR(to_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200557 goto out_err;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100558 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200559 preempt_disable();
560 from_virtual = kmap_atomic(from_page, KM_USER0);
561 to_virtual = kmap_atomic(to_page, KM_USER1);
562 memcpy(to_virtual, from_virtual, PAGE_SIZE);
563 kunmap_atomic(to_virtual, KM_USER1);
564 kunmap_atomic(from_virtual, KM_USER0);
565 preempt_enable();
566 set_page_dirty(to_page);
567 mark_page_accessed(to_page);
568 page_cache_release(to_page);
569 }
570
571 ttm_tt_free_alloced_pages(ttm);
572 ttm->swap_storage = swap_storage;
573 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
574 if (persistant_swap_storage)
575 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
576
577 return 0;
578out_err:
579 if (!persistant_swap_storage)
580 fput(swap_storage);
581
Maarten Maathuis290e55052010-02-20 03:22:21 +0100582 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200583}