blob: f9cc548d6d98cddfe0eccb5dcc154b059162b2d1 [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020031#include <linux/sched.h>
32#include <linux/highmem.h>
33#include <linux/pagemap.h>
Hugh Dickins3142b652011-06-27 16:18:17 -070034#include <linux/shmem_fs.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020035#include <linux/file.h>
36#include <linux/swap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Paul Gortmaker2d1a8a42011-08-30 18:16:33 -040038#include <linux/export.h>
Dave Airliec9c97b82009-08-27 09:53:47 +100039#include "drm_cache.h"
Dave Airlie72e942d2010-03-09 06:33:26 +000040#include "drm_mem_util.h"
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020041#include "ttm/ttm_module.h"
42#include "ttm/ttm_bo_driver.h"
43#include "ttm/ttm_placement.h"
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000044#include "ttm/ttm_page_alloc.h"
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020045
46static int ttm_tt_swapin(struct ttm_tt *ttm);
47
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020048/**
49 * Allocates storage for pointers to the pages that back the ttm.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020050 */
51static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
52{
Dave Airlie72e942d2010-03-09 06:33:26 +000053 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
Konrad Rzeszutek Wilkf9820a42010-11-29 13:52:18 -050054 ttm->dma_address = drm_calloc_large(ttm->num_pages,
55 sizeof(*ttm->dma_address));
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020056}
57
58static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
59{
Dave Airlie72e942d2010-03-09 06:33:26 +000060 drm_free_large(ttm->pages);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020061 ttm->pages = NULL;
Konrad Rzeszutek Wilkf9820a42010-11-29 13:52:18 -050062 drm_free_large(ttm->dma_address);
63 ttm->dma_address = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020064}
65
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020066static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
67{
68 int write;
69 int dirty;
70 struct page *page;
71 int i;
72 struct ttm_backend *be = ttm->be;
73
74 BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
75 write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
76 dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
77
78 if (be)
79 be->func->clear(be);
80
81 for (i = 0; i < ttm->num_pages; ++i) {
82 page = ttm->pages[i];
83 if (page == NULL)
84 continue;
85
86 if (page == ttm->dummy_read_page) {
87 BUG_ON(write);
88 continue;
89 }
90
91 if (write && dirty && !PageReserved(page))
92 set_page_dirty_lock(page);
93
94 ttm->pages[i] = NULL;
Thomas Hellstroma987fca2009-08-18 16:51:56 +020095 ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020096 put_page(page);
97 }
98 ttm->state = tt_unpopulated;
99 ttm->first_himem_page = ttm->num_pages;
100 ttm->last_lomem_page = -1;
101}
102
103static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
104{
105 struct page *p;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000106 struct list_head h;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200107 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200108 int ret;
109
110 while (NULL == (p = ttm->pages[index])) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200111
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000112 INIT_LIST_HEAD(&h);
113
Konrad Rzeszutek Wilkf9820a42010-11-29 13:52:18 -0500114 ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
Dave Airliea2c06ee2011-02-23 14:24:01 +1000115 &ttm->dma_address[index]);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000116
117 if (ret != 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200118 return NULL;
119
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000120 p = list_first_entry(&h, struct page, lru);
121
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200122 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
123 if (unlikely(ret != 0))
124 goto out_err;
125
126 if (PageHighMem(p))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200127 ttm->pages[--ttm->first_himem_page] = p;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200128 else
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200129 ttm->pages[++ttm->last_lomem_page] = p;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200130 }
131 return p;
132out_err:
133 put_page(p);
134 return NULL;
135}
136
137struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
138{
139 int ret;
140
141 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
142 ret = ttm_tt_swapin(ttm);
143 if (unlikely(ret != 0))
144 return NULL;
145 }
146 return __ttm_tt_get_page(ttm, index);
147}
148
149int ttm_tt_populate(struct ttm_tt *ttm)
150{
151 struct page *page;
152 unsigned long i;
153 struct ttm_backend *be;
154 int ret;
155
156 if (ttm->state != tt_unpopulated)
157 return 0;
158
159 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
160 ret = ttm_tt_swapin(ttm);
161 if (unlikely(ret != 0))
162 return ret;
163 }
164
165 be = ttm->be;
166
167 for (i = 0; i < ttm->num_pages; ++i) {
168 page = __ttm_tt_get_page(ttm, i);
169 if (!page)
170 return -ENOMEM;
171 }
172
173 be->func->populate(be, ttm->num_pages, ttm->pages,
Konrad Rzeszutek Wilk27e8b232010-12-02 10:24:13 -0500174 ttm->dummy_read_page, ttm->dma_address);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200175 ttm->state = tt_unbound;
176 return 0;
177}
Thomas Hellstrom4bfd75c2009-12-06 21:46:27 +0100178EXPORT_SYMBOL(ttm_tt_populate);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200179
180#ifdef CONFIG_X86
181static inline int ttm_tt_set_page_caching(struct page *p,
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000182 enum ttm_caching_state c_old,
183 enum ttm_caching_state c_new)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200184{
Francisco Jerezdb78e272010-01-12 18:49:43 +0100185 int ret = 0;
186
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200187 if (PageHighMem(p))
188 return 0;
189
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000190 if (c_old != tt_cached) {
Francisco Jerezdb78e272010-01-12 18:49:43 +0100191 /* p isn't in the default caching state, set it to
192 * writeback first to free its current memtype. */
193
194 ret = set_pages_wb(p, 1);
195 if (ret)
196 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200197 }
Francisco Jerezdb78e272010-01-12 18:49:43 +0100198
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000199 if (c_new == tt_wc)
Francisco Jerezdb78e272010-01-12 18:49:43 +0100200 ret = set_memory_wc((unsigned long) page_address(p), 1);
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000201 else if (c_new == tt_uncached)
Francisco Jerezdb78e272010-01-12 18:49:43 +0100202 ret = set_pages_uc(p, 1);
203
204 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200205}
206#else /* CONFIG_X86 */
207static inline int ttm_tt_set_page_caching(struct page *p,
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000208 enum ttm_caching_state c_old,
209 enum ttm_caching_state c_new)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200210{
211 return 0;
212}
213#endif /* CONFIG_X86 */
214
215/*
216 * Change caching policy for the linear kernel map
217 * for range of pages in a ttm.
218 */
219
220static int ttm_tt_set_caching(struct ttm_tt *ttm,
221 enum ttm_caching_state c_state)
222{
223 int i, j;
224 struct page *cur_page;
225 int ret;
226
227 if (ttm->caching_state == c_state)
228 return 0;
229
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000230 if (ttm->state == tt_unpopulated) {
231 /* Change caching but don't populate */
232 ttm->caching_state = c_state;
233 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200234 }
235
236 if (ttm->caching_state == tt_cached)
Dave Airliec9c97b82009-08-27 09:53:47 +1000237 drm_clflush_pages(ttm->pages, ttm->num_pages);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200238
239 for (i = 0; i < ttm->num_pages; ++i) {
240 cur_page = ttm->pages[i];
241 if (likely(cur_page != NULL)) {
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000242 ret = ttm_tt_set_page_caching(cur_page,
243 ttm->caching_state,
244 c_state);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200245 if (unlikely(ret != 0))
246 goto out_err;
247 }
248 }
249
250 ttm->caching_state = c_state;
251
252 return 0;
253
254out_err:
255 for (j = 0; j < i; ++j) {
256 cur_page = ttm->pages[j];
257 if (likely(cur_page != NULL)) {
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000258 (void)ttm_tt_set_page_caching(cur_page, c_state,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200259 ttm->caching_state);
260 }
261 }
262
263 return ret;
264}
265
266int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
267{
268 enum ttm_caching_state state;
269
270 if (placement & TTM_PL_FLAG_WC)
271 state = tt_wc;
272 else if (placement & TTM_PL_FLAG_UNCACHED)
273 state = tt_uncached;
274 else
275 state = tt_cached;
276
277 return ttm_tt_set_caching(ttm, state);
278}
Dave Airliedf67bed2009-10-30 13:31:26 +1000279EXPORT_SYMBOL(ttm_tt_set_placement_caching);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200280
281static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
282{
283 int i;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000284 unsigned count = 0;
285 struct list_head h;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200286 struct page *cur_page;
287 struct ttm_backend *be = ttm->be;
288
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000289 INIT_LIST_HEAD(&h);
290
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200291 if (be)
292 be->func->clear(be);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200293 for (i = 0; i < ttm->num_pages; ++i) {
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000294
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200295 cur_page = ttm->pages[i];
296 ttm->pages[i] = NULL;
297 if (cur_page) {
298 if (page_count(cur_page) != 1)
299 printk(KERN_ERR TTM_PFX
300 "Erroneous page count. "
301 "Leaking pages.\n");
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200302 ttm_mem_global_free_page(ttm->glob->mem_glob,
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200303 cur_page);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000304 list_add(&cur_page->lru, &h);
305 count++;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200306 }
307 }
Konrad Rzeszutek Wilkf9820a42010-11-29 13:52:18 -0500308 ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
Dave Airliea2c06ee2011-02-23 14:24:01 +1000309 ttm->dma_address);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200310 ttm->state = tt_unpopulated;
311 ttm->first_himem_page = ttm->num_pages;
312 ttm->last_lomem_page = -1;
313}
314
315void ttm_tt_destroy(struct ttm_tt *ttm)
316{
317 struct ttm_backend *be;
318
319 if (unlikely(ttm == NULL))
320 return;
321
322 be = ttm->be;
323 if (likely(be != NULL)) {
324 be->func->destroy(be);
325 ttm->be = NULL;
326 }
327
328 if (likely(ttm->pages != NULL)) {
329 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
330 ttm_tt_free_user_pages(ttm);
331 else
332 ttm_tt_free_alloced_pages(ttm);
333
334 ttm_tt_free_page_directory(ttm);
335 }
336
Jan Engelhardt5df23972011-04-04 01:25:18 +0200337 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200338 ttm->swap_storage)
339 fput(ttm->swap_storage);
340
341 kfree(ttm);
342}
343
344int ttm_tt_set_user(struct ttm_tt *ttm,
345 struct task_struct *tsk,
346 unsigned long start, unsigned long num_pages)
347{
348 struct mm_struct *mm = tsk->mm;
349 int ret;
350 int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200351 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200352
353 BUG_ON(num_pages != ttm->num_pages);
354 BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
355
356 /**
357 * Account user pages as lowmem pages for now.
358 */
359
360 ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200361 false, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200362 if (unlikely(ret != 0))
363 return ret;
364
365 down_read(&mm->mmap_sem);
366 ret = get_user_pages(tsk, mm, start, num_pages,
367 write, 0, ttm->pages, NULL);
368 up_read(&mm->mmap_sem);
369
370 if (ret != num_pages && write) {
371 ttm_tt_free_user_pages(ttm);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200372 ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200373 return -ENOMEM;
374 }
375
376 ttm->tsk = tsk;
377 ttm->start = start;
378 ttm->state = tt_unbound;
379
380 return 0;
381}
382
383struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
384 uint32_t page_flags, struct page *dummy_read_page)
385{
386 struct ttm_bo_driver *bo_driver = bdev->driver;
387 struct ttm_tt *ttm;
388
389 if (!bo_driver)
390 return NULL;
391
392 ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
393 if (!ttm)
394 return NULL;
395
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200396 ttm->glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200397 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
398 ttm->first_himem_page = ttm->num_pages;
399 ttm->last_lomem_page = -1;
400 ttm->caching_state = tt_cached;
401 ttm->page_flags = page_flags;
402
403 ttm->dummy_read_page = dummy_read_page;
404
405 ttm_tt_alloc_page_directory(ttm);
406 if (!ttm->pages) {
407 ttm_tt_destroy(ttm);
408 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
409 return NULL;
410 }
411 ttm->be = bo_driver->create_ttm_backend_entry(bdev);
412 if (!ttm->be) {
413 ttm_tt_destroy(ttm);
414 printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
415 return NULL;
416 }
417 ttm->state = tt_unpopulated;
418 return ttm;
419}
420
421void ttm_tt_unbind(struct ttm_tt *ttm)
422{
423 int ret;
424 struct ttm_backend *be = ttm->be;
425
426 if (ttm->state == tt_bound) {
427 ret = be->func->unbind(be);
428 BUG_ON(ret);
429 ttm->state = tt_unbound;
430 }
431}
432
433int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
434{
435 int ret = 0;
436 struct ttm_backend *be;
437
438 if (!ttm)
439 return -EINVAL;
440
441 if (ttm->state == tt_bound)
442 return 0;
443
444 be = ttm->be;
445
446 ret = ttm_tt_populate(ttm);
447 if (ret)
448 return ret;
449
450 ret = be->func->bind(be, bo_mem);
Thomas Hellstrom7dcebb52010-10-29 10:46:49 +0200451 if (unlikely(ret != 0))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200452 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200453
454 ttm->state = tt_bound;
455
456 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
457 ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
458 return 0;
459}
460EXPORT_SYMBOL(ttm_tt_bind);
461
462static int ttm_tt_swapin(struct ttm_tt *ttm)
463{
464 struct address_space *swap_space;
465 struct file *swap_storage;
466 struct page *from_page;
467 struct page *to_page;
468 void *from_virtual;
469 void *to_virtual;
470 int i;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100471 int ret = -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200472
473 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
474 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
475 ttm->num_pages);
476 if (unlikely(ret != 0))
477 return ret;
478
479 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
480 return 0;
481 }
482
483 swap_storage = ttm->swap_storage;
484 BUG_ON(swap_storage == NULL);
485
486 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
487
488 for (i = 0; i < ttm->num_pages; ++i) {
Hugh Dickins3142b652011-06-27 16:18:17 -0700489 from_page = shmem_read_mapping_page(swap_space, i);
Maarten Maathuis290e55052010-02-20 03:22:21 +0100490 if (IS_ERR(from_page)) {
491 ret = PTR_ERR(from_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200492 goto out_err;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100493 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200494 to_page = __ttm_tt_get_page(ttm, i);
495 if (unlikely(to_page == NULL))
496 goto out_err;
497
498 preempt_disable();
499 from_virtual = kmap_atomic(from_page, KM_USER0);
500 to_virtual = kmap_atomic(to_page, KM_USER1);
501 memcpy(to_virtual, from_virtual, PAGE_SIZE);
502 kunmap_atomic(to_virtual, KM_USER1);
503 kunmap_atomic(from_virtual, KM_USER0);
504 preempt_enable();
505 page_cache_release(from_page);
506 }
507
Jan Engelhardt5df23972011-04-04 01:25:18 +0200508 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200509 fput(swap_storage);
510 ttm->swap_storage = NULL;
511 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
512
513 return 0;
514out_err:
515 ttm_tt_free_alloced_pages(ttm);
Maarten Maathuis290e55052010-02-20 03:22:21 +0100516 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200517}
518
Jan Engelhardt5df23972011-04-04 01:25:18 +0200519int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200520{
521 struct address_space *swap_space;
522 struct file *swap_storage;
523 struct page *from_page;
524 struct page *to_page;
525 void *from_virtual;
526 void *to_virtual;
527 int i;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100528 int ret = -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200529
530 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
531 BUG_ON(ttm->caching_state != tt_cached);
532
533 /*
534 * For user buffers, just unpin the pages, as there should be
535 * vma references.
536 */
537
538 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
539 ttm_tt_free_user_pages(ttm);
540 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
541 ttm->swap_storage = NULL;
542 return 0;
543 }
544
Jan Engelhardt5df23972011-04-04 01:25:18 +0200545 if (!persistent_swap_storage) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200546 swap_storage = shmem_file_setup("ttm swap",
547 ttm->num_pages << PAGE_SHIFT,
548 0);
549 if (unlikely(IS_ERR(swap_storage))) {
550 printk(KERN_ERR "Failed allocating swap storage.\n");
Maarten Maathuis290e55052010-02-20 03:22:21 +0100551 return PTR_ERR(swap_storage);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200552 }
553 } else
Jan Engelhardt5df23972011-04-04 01:25:18 +0200554 swap_storage = persistent_swap_storage;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200555
556 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
557
558 for (i = 0; i < ttm->num_pages; ++i) {
559 from_page = ttm->pages[i];
560 if (unlikely(from_page == NULL))
561 continue;
Hugh Dickins3142b652011-06-27 16:18:17 -0700562 to_page = shmem_read_mapping_page(swap_space, i);
Maarten Maathuis290e55052010-02-20 03:22:21 +0100563 if (unlikely(IS_ERR(to_page))) {
564 ret = PTR_ERR(to_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200565 goto out_err;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100566 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200567 preempt_disable();
568 from_virtual = kmap_atomic(from_page, KM_USER0);
569 to_virtual = kmap_atomic(to_page, KM_USER1);
570 memcpy(to_virtual, from_virtual, PAGE_SIZE);
571 kunmap_atomic(to_virtual, KM_USER1);
572 kunmap_atomic(from_virtual, KM_USER0);
573 preempt_enable();
574 set_page_dirty(to_page);
575 mark_page_accessed(to_page);
576 page_cache_release(to_page);
577 }
578
579 ttm_tt_free_alloced_pages(ttm);
580 ttm->swap_storage = swap_storage;
581 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
Jan Engelhardt5df23972011-04-04 01:25:18 +0200582 if (persistent_swap_storage)
583 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200584
585 return 0;
586out_err:
Jan Engelhardt5df23972011-04-04 01:25:18 +0200587 if (!persistent_swap_storage)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200588 fput(swap_storage);
589
Maarten Maathuis290e55052010-02-20 03:22:21 +0100590 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200591}