blob: 58c271ebc0f73f6829dae7cffa9d680480ad0dd1 [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020031#include <linux/sched.h>
32#include <linux/highmem.h>
33#include <linux/pagemap.h>
Hugh Dickins3142b652011-06-27 16:18:17 -070034#include <linux/shmem_fs.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020035#include <linux/file.h>
36#include <linux/swap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Dave Airliec9c97b82009-08-27 09:53:47 +100038#include "drm_cache.h"
Dave Airlie72e942d2010-03-09 06:33:26 +000039#include "drm_mem_util.h"
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020040#include "ttm/ttm_module.h"
41#include "ttm/ttm_bo_driver.h"
42#include "ttm/ttm_placement.h"
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000043#include "ttm/ttm_page_alloc.h"
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020044
45static int ttm_tt_swapin(struct ttm_tt *ttm);
46
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020047/**
48 * Allocates storage for pointers to the pages that back the ttm.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020049 */
50static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
51{
Dave Airlie72e942d2010-03-09 06:33:26 +000052 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
Konrad Rzeszutek Wilkf9820a42010-11-29 13:52:18 -050053 ttm->dma_address = drm_calloc_large(ttm->num_pages,
54 sizeof(*ttm->dma_address));
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020055}
56
57static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
58{
Dave Airlie72e942d2010-03-09 06:33:26 +000059 drm_free_large(ttm->pages);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020060 ttm->pages = NULL;
Konrad Rzeszutek Wilkf9820a42010-11-29 13:52:18 -050061 drm_free_large(ttm->dma_address);
62 ttm->dma_address = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020063}
64
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020065static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
66{
67 int write;
68 int dirty;
69 struct page *page;
70 int i;
71 struct ttm_backend *be = ttm->be;
72
73 BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
74 write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
75 dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
76
77 if (be)
78 be->func->clear(be);
79
80 for (i = 0; i < ttm->num_pages; ++i) {
81 page = ttm->pages[i];
82 if (page == NULL)
83 continue;
84
85 if (page == ttm->dummy_read_page) {
86 BUG_ON(write);
87 continue;
88 }
89
90 if (write && dirty && !PageReserved(page))
91 set_page_dirty_lock(page);
92
93 ttm->pages[i] = NULL;
Thomas Hellstroma987fca2009-08-18 16:51:56 +020094 ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020095 put_page(page);
96 }
97 ttm->state = tt_unpopulated;
98 ttm->first_himem_page = ttm->num_pages;
99 ttm->last_lomem_page = -1;
100}
101
102static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
103{
104 struct page *p;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000105 struct list_head h;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200106 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200107 int ret;
108
109 while (NULL == (p = ttm->pages[index])) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200110
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000111 INIT_LIST_HEAD(&h);
112
Konrad Rzeszutek Wilkf9820a42010-11-29 13:52:18 -0500113 ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
Dave Airliea2c06ee2011-02-23 14:24:01 +1000114 &ttm->dma_address[index]);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000115
116 if (ret != 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200117 return NULL;
118
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000119 p = list_first_entry(&h, struct page, lru);
120
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200121 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
122 if (unlikely(ret != 0))
123 goto out_err;
124
125 if (PageHighMem(p))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200126 ttm->pages[--ttm->first_himem_page] = p;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200127 else
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200128 ttm->pages[++ttm->last_lomem_page] = p;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200129 }
130 return p;
131out_err:
132 put_page(p);
133 return NULL;
134}
135
136struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
137{
138 int ret;
139
140 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
141 ret = ttm_tt_swapin(ttm);
142 if (unlikely(ret != 0))
143 return NULL;
144 }
145 return __ttm_tt_get_page(ttm, index);
146}
147
148int ttm_tt_populate(struct ttm_tt *ttm)
149{
150 struct page *page;
151 unsigned long i;
152 struct ttm_backend *be;
153 int ret;
154
155 if (ttm->state != tt_unpopulated)
156 return 0;
157
158 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
159 ret = ttm_tt_swapin(ttm);
160 if (unlikely(ret != 0))
161 return ret;
162 }
163
164 be = ttm->be;
165
166 for (i = 0; i < ttm->num_pages; ++i) {
167 page = __ttm_tt_get_page(ttm, i);
168 if (!page)
169 return -ENOMEM;
170 }
171
172 be->func->populate(be, ttm->num_pages, ttm->pages,
Konrad Rzeszutek Wilk27e8b232010-12-02 10:24:13 -0500173 ttm->dummy_read_page, ttm->dma_address);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200174 ttm->state = tt_unbound;
175 return 0;
176}
Thomas Hellstrom4bfd75c2009-12-06 21:46:27 +0100177EXPORT_SYMBOL(ttm_tt_populate);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200178
179#ifdef CONFIG_X86
180static inline int ttm_tt_set_page_caching(struct page *p,
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000181 enum ttm_caching_state c_old,
182 enum ttm_caching_state c_new)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200183{
Francisco Jerezdb78e272010-01-12 18:49:43 +0100184 int ret = 0;
185
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200186 if (PageHighMem(p))
187 return 0;
188
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000189 if (c_old != tt_cached) {
Francisco Jerezdb78e272010-01-12 18:49:43 +0100190 /* p isn't in the default caching state, set it to
191 * writeback first to free its current memtype. */
192
193 ret = set_pages_wb(p, 1);
194 if (ret)
195 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200196 }
Francisco Jerezdb78e272010-01-12 18:49:43 +0100197
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000198 if (c_new == tt_wc)
Francisco Jerezdb78e272010-01-12 18:49:43 +0100199 ret = set_memory_wc((unsigned long) page_address(p), 1);
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000200 else if (c_new == tt_uncached)
Francisco Jerezdb78e272010-01-12 18:49:43 +0100201 ret = set_pages_uc(p, 1);
202
203 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200204}
205#else /* CONFIG_X86 */
206static inline int ttm_tt_set_page_caching(struct page *p,
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000207 enum ttm_caching_state c_old,
208 enum ttm_caching_state c_new)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200209{
210 return 0;
211}
212#endif /* CONFIG_X86 */
213
214/*
215 * Change caching policy for the linear kernel map
216 * for range of pages in a ttm.
217 */
218
219static int ttm_tt_set_caching(struct ttm_tt *ttm,
220 enum ttm_caching_state c_state)
221{
222 int i, j;
223 struct page *cur_page;
224 int ret;
225
226 if (ttm->caching_state == c_state)
227 return 0;
228
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000229 if (ttm->state == tt_unpopulated) {
230 /* Change caching but don't populate */
231 ttm->caching_state = c_state;
232 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200233 }
234
235 if (ttm->caching_state == tt_cached)
Dave Airliec9c97b82009-08-27 09:53:47 +1000236 drm_clflush_pages(ttm->pages, ttm->num_pages);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200237
238 for (i = 0; i < ttm->num_pages; ++i) {
239 cur_page = ttm->pages[i];
240 if (likely(cur_page != NULL)) {
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000241 ret = ttm_tt_set_page_caching(cur_page,
242 ttm->caching_state,
243 c_state);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200244 if (unlikely(ret != 0))
245 goto out_err;
246 }
247 }
248
249 ttm->caching_state = c_state;
250
251 return 0;
252
253out_err:
254 for (j = 0; j < i; ++j) {
255 cur_page = ttm->pages[j];
256 if (likely(cur_page != NULL)) {
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000257 (void)ttm_tt_set_page_caching(cur_page, c_state,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200258 ttm->caching_state);
259 }
260 }
261
262 return ret;
263}
264
265int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
266{
267 enum ttm_caching_state state;
268
269 if (placement & TTM_PL_FLAG_WC)
270 state = tt_wc;
271 else if (placement & TTM_PL_FLAG_UNCACHED)
272 state = tt_uncached;
273 else
274 state = tt_cached;
275
276 return ttm_tt_set_caching(ttm, state);
277}
Dave Airliedf67bed2009-10-30 13:31:26 +1000278EXPORT_SYMBOL(ttm_tt_set_placement_caching);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200279
280static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
281{
282 int i;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000283 unsigned count = 0;
284 struct list_head h;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200285 struct page *cur_page;
286 struct ttm_backend *be = ttm->be;
287
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000288 INIT_LIST_HEAD(&h);
289
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200290 if (be)
291 be->func->clear(be);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200292 for (i = 0; i < ttm->num_pages; ++i) {
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000293
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200294 cur_page = ttm->pages[i];
295 ttm->pages[i] = NULL;
296 if (cur_page) {
297 if (page_count(cur_page) != 1)
298 printk(KERN_ERR TTM_PFX
299 "Erroneous page count. "
300 "Leaking pages.\n");
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200301 ttm_mem_global_free_page(ttm->glob->mem_glob,
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200302 cur_page);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000303 list_add(&cur_page->lru, &h);
304 count++;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200305 }
306 }
Konrad Rzeszutek Wilkf9820a42010-11-29 13:52:18 -0500307 ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
Dave Airliea2c06ee2011-02-23 14:24:01 +1000308 ttm->dma_address);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200309 ttm->state = tt_unpopulated;
310 ttm->first_himem_page = ttm->num_pages;
311 ttm->last_lomem_page = -1;
312}
313
314void ttm_tt_destroy(struct ttm_tt *ttm)
315{
316 struct ttm_backend *be;
317
318 if (unlikely(ttm == NULL))
319 return;
320
321 be = ttm->be;
322 if (likely(be != NULL)) {
323 be->func->destroy(be);
324 ttm->be = NULL;
325 }
326
327 if (likely(ttm->pages != NULL)) {
328 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
329 ttm_tt_free_user_pages(ttm);
330 else
331 ttm_tt_free_alloced_pages(ttm);
332
333 ttm_tt_free_page_directory(ttm);
334 }
335
Jan Engelhardt5df23972011-04-04 01:25:18 +0200336 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200337 ttm->swap_storage)
338 fput(ttm->swap_storage);
339
340 kfree(ttm);
341}
342
343int ttm_tt_set_user(struct ttm_tt *ttm,
344 struct task_struct *tsk,
345 unsigned long start, unsigned long num_pages)
346{
347 struct mm_struct *mm = tsk->mm;
348 int ret;
349 int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200350 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200351
352 BUG_ON(num_pages != ttm->num_pages);
353 BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
354
355 /**
356 * Account user pages as lowmem pages for now.
357 */
358
359 ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200360 false, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200361 if (unlikely(ret != 0))
362 return ret;
363
364 down_read(&mm->mmap_sem);
365 ret = get_user_pages(tsk, mm, start, num_pages,
366 write, 0, ttm->pages, NULL);
367 up_read(&mm->mmap_sem);
368
369 if (ret != num_pages && write) {
370 ttm_tt_free_user_pages(ttm);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200371 ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200372 return -ENOMEM;
373 }
374
375 ttm->tsk = tsk;
376 ttm->start = start;
377 ttm->state = tt_unbound;
378
379 return 0;
380}
381
382struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
383 uint32_t page_flags, struct page *dummy_read_page)
384{
385 struct ttm_bo_driver *bo_driver = bdev->driver;
386 struct ttm_tt *ttm;
387
388 if (!bo_driver)
389 return NULL;
390
391 ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
392 if (!ttm)
393 return NULL;
394
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200395 ttm->glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200396 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
397 ttm->first_himem_page = ttm->num_pages;
398 ttm->last_lomem_page = -1;
399 ttm->caching_state = tt_cached;
400 ttm->page_flags = page_flags;
401
402 ttm->dummy_read_page = dummy_read_page;
403
404 ttm_tt_alloc_page_directory(ttm);
405 if (!ttm->pages) {
406 ttm_tt_destroy(ttm);
407 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
408 return NULL;
409 }
410 ttm->be = bo_driver->create_ttm_backend_entry(bdev);
411 if (!ttm->be) {
412 ttm_tt_destroy(ttm);
413 printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
414 return NULL;
415 }
416 ttm->state = tt_unpopulated;
417 return ttm;
418}
419
420void ttm_tt_unbind(struct ttm_tt *ttm)
421{
422 int ret;
423 struct ttm_backend *be = ttm->be;
424
425 if (ttm->state == tt_bound) {
426 ret = be->func->unbind(be);
427 BUG_ON(ret);
428 ttm->state = tt_unbound;
429 }
430}
431
432int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
433{
434 int ret = 0;
435 struct ttm_backend *be;
436
437 if (!ttm)
438 return -EINVAL;
439
440 if (ttm->state == tt_bound)
441 return 0;
442
443 be = ttm->be;
444
445 ret = ttm_tt_populate(ttm);
446 if (ret)
447 return ret;
448
449 ret = be->func->bind(be, bo_mem);
Thomas Hellstrom7dcebb52010-10-29 10:46:49 +0200450 if (unlikely(ret != 0))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200451 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200452
453 ttm->state = tt_bound;
454
455 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
456 ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
457 return 0;
458}
459EXPORT_SYMBOL(ttm_tt_bind);
460
461static int ttm_tt_swapin(struct ttm_tt *ttm)
462{
463 struct address_space *swap_space;
464 struct file *swap_storage;
465 struct page *from_page;
466 struct page *to_page;
467 void *from_virtual;
468 void *to_virtual;
469 int i;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100470 int ret = -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200471
472 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
473 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
474 ttm->num_pages);
475 if (unlikely(ret != 0))
476 return ret;
477
478 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
479 return 0;
480 }
481
482 swap_storage = ttm->swap_storage;
483 BUG_ON(swap_storage == NULL);
484
485 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
486
487 for (i = 0; i < ttm->num_pages; ++i) {
Hugh Dickins3142b652011-06-27 16:18:17 -0700488 from_page = shmem_read_mapping_page(swap_space, i);
Maarten Maathuis290e55052010-02-20 03:22:21 +0100489 if (IS_ERR(from_page)) {
490 ret = PTR_ERR(from_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200491 goto out_err;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100492 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200493 to_page = __ttm_tt_get_page(ttm, i);
494 if (unlikely(to_page == NULL))
495 goto out_err;
496
497 preempt_disable();
498 from_virtual = kmap_atomic(from_page, KM_USER0);
499 to_virtual = kmap_atomic(to_page, KM_USER1);
500 memcpy(to_virtual, from_virtual, PAGE_SIZE);
501 kunmap_atomic(to_virtual, KM_USER1);
502 kunmap_atomic(from_virtual, KM_USER0);
503 preempt_enable();
504 page_cache_release(from_page);
505 }
506
Jan Engelhardt5df23972011-04-04 01:25:18 +0200507 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200508 fput(swap_storage);
509 ttm->swap_storage = NULL;
510 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
511
512 return 0;
513out_err:
514 ttm_tt_free_alloced_pages(ttm);
Maarten Maathuis290e55052010-02-20 03:22:21 +0100515 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200516}
517
Jan Engelhardt5df23972011-04-04 01:25:18 +0200518int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200519{
520 struct address_space *swap_space;
521 struct file *swap_storage;
522 struct page *from_page;
523 struct page *to_page;
524 void *from_virtual;
525 void *to_virtual;
526 int i;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100527 int ret = -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200528
529 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
530 BUG_ON(ttm->caching_state != tt_cached);
531
532 /*
533 * For user buffers, just unpin the pages, as there should be
534 * vma references.
535 */
536
537 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
538 ttm_tt_free_user_pages(ttm);
539 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
540 ttm->swap_storage = NULL;
541 return 0;
542 }
543
Jan Engelhardt5df23972011-04-04 01:25:18 +0200544 if (!persistent_swap_storage) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200545 swap_storage = shmem_file_setup("ttm swap",
546 ttm->num_pages << PAGE_SHIFT,
547 0);
548 if (unlikely(IS_ERR(swap_storage))) {
549 printk(KERN_ERR "Failed allocating swap storage.\n");
Maarten Maathuis290e55052010-02-20 03:22:21 +0100550 return PTR_ERR(swap_storage);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200551 }
552 } else
Jan Engelhardt5df23972011-04-04 01:25:18 +0200553 swap_storage = persistent_swap_storage;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200554
555 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
556
557 for (i = 0; i < ttm->num_pages; ++i) {
558 from_page = ttm->pages[i];
559 if (unlikely(from_page == NULL))
560 continue;
Hugh Dickins3142b652011-06-27 16:18:17 -0700561 to_page = shmem_read_mapping_page(swap_space, i);
Maarten Maathuis290e55052010-02-20 03:22:21 +0100562 if (unlikely(IS_ERR(to_page))) {
563 ret = PTR_ERR(to_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200564 goto out_err;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100565 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200566 preempt_disable();
567 from_virtual = kmap_atomic(from_page, KM_USER0);
568 to_virtual = kmap_atomic(to_page, KM_USER1);
569 memcpy(to_virtual, from_virtual, PAGE_SIZE);
570 kunmap_atomic(to_virtual, KM_USER1);
571 kunmap_atomic(from_virtual, KM_USER0);
572 preempt_enable();
573 set_page_dirty(to_page);
574 mark_page_accessed(to_page);
575 page_cache_release(to_page);
576 }
577
578 ttm_tt_free_alloced_pages(ttm);
579 ttm->swap_storage = swap_storage;
580 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
Jan Engelhardt5df23972011-04-04 01:25:18 +0200581 if (persistent_swap_storage)
582 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200583
584 return 0;
585out_err:
Jan Engelhardt5df23972011-04-04 01:25:18 +0200586 if (!persistent_swap_storage)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200587 fput(swap_storage);
588
Maarten Maathuis290e55052010-02-20 03:22:21 +0100589 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200590}