blob: b0f73096d3721620f8f6d4c7e812889948df0422 [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020031#include <linux/vmalloc.h>
32#include <linux/sched.h>
33#include <linux/highmem.h>
34#include <linux/pagemap.h>
35#include <linux/file.h>
36#include <linux/swap.h>
37#include "ttm/ttm_module.h"
38#include "ttm/ttm_bo_driver.h"
39#include "ttm/ttm_placement.h"
40
41static int ttm_tt_swapin(struct ttm_tt *ttm);
42
43#if defined(CONFIG_X86)
44static void ttm_tt_clflush_page(struct page *page)
45{
46 uint8_t *page_virtual;
47 unsigned int i;
48
49 if (unlikely(page == NULL))
50 return;
51
52 page_virtual = kmap_atomic(page, KM_USER0);
53
54 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
55 clflush(page_virtual + i);
56
57 kunmap_atomic(page_virtual, KM_USER0);
58}
59
60static void ttm_tt_cache_flush_clflush(struct page *pages[],
61 unsigned long num_pages)
62{
63 unsigned long i;
64
65 mb();
66 for (i = 0; i < num_pages; ++i)
67 ttm_tt_clflush_page(*pages++);
68 mb();
69}
Michel Dänzer46f4b3e2009-06-15 16:56:13 +020070#elif !defined(__powerpc__)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020071static void ttm_tt_ipi_handler(void *null)
72{
73 ;
74}
75#endif
76
77void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
78{
79
80#if defined(CONFIG_X86)
81 if (cpu_has_clflush) {
82 ttm_tt_cache_flush_clflush(pages, num_pages);
83 return;
84 }
Michel Dänzer46f4b3e2009-06-15 16:56:13 +020085#elif defined(__powerpc__)
86 unsigned long i;
87
88 for (i = 0; i < num_pages; ++i) {
89 if (pages[i]) {
90 unsigned long start = (unsigned long)page_address(pages[i]);
91 flush_dcache_range(start, start + PAGE_SIZE);
92 }
93 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020094#else
95 if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
96 printk(KERN_ERR TTM_PFX
97 "Timed out waiting for drm cache flush.\n");
98#endif
99}
100
101/**
102 * Allocates storage for pointers to the pages that back the ttm.
103 *
104 * Uses kmalloc if possible. Otherwise falls back to vmalloc.
105 */
106static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
107{
108 unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
109 ttm->pages = NULL;
110
111 if (size <= PAGE_SIZE)
112 ttm->pages = kzalloc(size, GFP_KERNEL);
113
114 if (!ttm->pages) {
115 ttm->pages = vmalloc_user(size);
116 if (ttm->pages)
117 ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
118 }
119}
120
121static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
122{
123 if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
124 vfree(ttm->pages);
125 ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
126 } else {
127 kfree(ttm->pages);
128 }
129 ttm->pages = NULL;
130}
131
132static struct page *ttm_tt_alloc_page(unsigned page_flags)
133{
134 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
135 return alloc_page(GFP_HIGHUSER | __GFP_ZERO);
136
137 return alloc_page(GFP_HIGHUSER);
138}
139
140static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
141{
142 int write;
143 int dirty;
144 struct page *page;
145 int i;
146 struct ttm_backend *be = ttm->be;
147
148 BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
149 write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
150 dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
151
152 if (be)
153 be->func->clear(be);
154
155 for (i = 0; i < ttm->num_pages; ++i) {
156 page = ttm->pages[i];
157 if (page == NULL)
158 continue;
159
160 if (page == ttm->dummy_read_page) {
161 BUG_ON(write);
162 continue;
163 }
164
165 if (write && dirty && !PageReserved(page))
166 set_page_dirty_lock(page);
167
168 ttm->pages[i] = NULL;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200169 ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200170 put_page(page);
171 }
172 ttm->state = tt_unpopulated;
173 ttm->first_himem_page = ttm->num_pages;
174 ttm->last_lomem_page = -1;
175}
176
177static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
178{
179 struct page *p;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200180 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200181 int ret;
182
183 while (NULL == (p = ttm->pages[index])) {
184 p = ttm_tt_alloc_page(ttm->page_flags);
185
186 if (!p)
187 return NULL;
188
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200189 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
190 if (unlikely(ret != 0))
191 goto out_err;
192
193 if (PageHighMem(p))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200194 ttm->pages[--ttm->first_himem_page] = p;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200195 else
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200196 ttm->pages[++ttm->last_lomem_page] = p;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200197 }
198 return p;
199out_err:
200 put_page(p);
201 return NULL;
202}
203
204struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
205{
206 int ret;
207
208 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
209 ret = ttm_tt_swapin(ttm);
210 if (unlikely(ret != 0))
211 return NULL;
212 }
213 return __ttm_tt_get_page(ttm, index);
214}
215
216int ttm_tt_populate(struct ttm_tt *ttm)
217{
218 struct page *page;
219 unsigned long i;
220 struct ttm_backend *be;
221 int ret;
222
223 if (ttm->state != tt_unpopulated)
224 return 0;
225
226 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
227 ret = ttm_tt_swapin(ttm);
228 if (unlikely(ret != 0))
229 return ret;
230 }
231
232 be = ttm->be;
233
234 for (i = 0; i < ttm->num_pages; ++i) {
235 page = __ttm_tt_get_page(ttm, i);
236 if (!page)
237 return -ENOMEM;
238 }
239
240 be->func->populate(be, ttm->num_pages, ttm->pages,
241 ttm->dummy_read_page);
242 ttm->state = tt_unbound;
243 return 0;
244}
245
246#ifdef CONFIG_X86
247static inline int ttm_tt_set_page_caching(struct page *p,
248 enum ttm_caching_state c_state)
249{
250 if (PageHighMem(p))
251 return 0;
252
253 switch (c_state) {
254 case tt_cached:
255 return set_pages_wb(p, 1);
256 case tt_wc:
257 return set_memory_wc((unsigned long) page_address(p), 1);
258 default:
259 return set_pages_uc(p, 1);
260 }
261}
262#else /* CONFIG_X86 */
263static inline int ttm_tt_set_page_caching(struct page *p,
264 enum ttm_caching_state c_state)
265{
266 return 0;
267}
268#endif /* CONFIG_X86 */
269
270/*
271 * Change caching policy for the linear kernel map
272 * for range of pages in a ttm.
273 */
274
275static int ttm_tt_set_caching(struct ttm_tt *ttm,
276 enum ttm_caching_state c_state)
277{
278 int i, j;
279 struct page *cur_page;
280 int ret;
281
282 if (ttm->caching_state == c_state)
283 return 0;
284
285 if (c_state != tt_cached) {
286 ret = ttm_tt_populate(ttm);
287 if (unlikely(ret != 0))
288 return ret;
289 }
290
291 if (ttm->caching_state == tt_cached)
292 ttm_tt_cache_flush(ttm->pages, ttm->num_pages);
293
294 for (i = 0; i < ttm->num_pages; ++i) {
295 cur_page = ttm->pages[i];
296 if (likely(cur_page != NULL)) {
297 ret = ttm_tt_set_page_caching(cur_page, c_state);
298 if (unlikely(ret != 0))
299 goto out_err;
300 }
301 }
302
303 ttm->caching_state = c_state;
304
305 return 0;
306
307out_err:
308 for (j = 0; j < i; ++j) {
309 cur_page = ttm->pages[j];
310 if (likely(cur_page != NULL)) {
311 (void)ttm_tt_set_page_caching(cur_page,
312 ttm->caching_state);
313 }
314 }
315
316 return ret;
317}
318
319int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
320{
321 enum ttm_caching_state state;
322
323 if (placement & TTM_PL_FLAG_WC)
324 state = tt_wc;
325 else if (placement & TTM_PL_FLAG_UNCACHED)
326 state = tt_uncached;
327 else
328 state = tt_cached;
329
330 return ttm_tt_set_caching(ttm, state);
331}
332
333static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
334{
335 int i;
336 struct page *cur_page;
337 struct ttm_backend *be = ttm->be;
338
339 if (be)
340 be->func->clear(be);
341 (void)ttm_tt_set_caching(ttm, tt_cached);
342 for (i = 0; i < ttm->num_pages; ++i) {
343 cur_page = ttm->pages[i];
344 ttm->pages[i] = NULL;
345 if (cur_page) {
346 if (page_count(cur_page) != 1)
347 printk(KERN_ERR TTM_PFX
348 "Erroneous page count. "
349 "Leaking pages.\n");
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200350 ttm_mem_global_free_page(ttm->glob->mem_glob,
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200351 cur_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200352 __free_page(cur_page);
353 }
354 }
355 ttm->state = tt_unpopulated;
356 ttm->first_himem_page = ttm->num_pages;
357 ttm->last_lomem_page = -1;
358}
359
360void ttm_tt_destroy(struct ttm_tt *ttm)
361{
362 struct ttm_backend *be;
363
364 if (unlikely(ttm == NULL))
365 return;
366
367 be = ttm->be;
368 if (likely(be != NULL)) {
369 be->func->destroy(be);
370 ttm->be = NULL;
371 }
372
373 if (likely(ttm->pages != NULL)) {
374 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
375 ttm_tt_free_user_pages(ttm);
376 else
377 ttm_tt_free_alloced_pages(ttm);
378
379 ttm_tt_free_page_directory(ttm);
380 }
381
382 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
383 ttm->swap_storage)
384 fput(ttm->swap_storage);
385
386 kfree(ttm);
387}
388
389int ttm_tt_set_user(struct ttm_tt *ttm,
390 struct task_struct *tsk,
391 unsigned long start, unsigned long num_pages)
392{
393 struct mm_struct *mm = tsk->mm;
394 int ret;
395 int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200396 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200397
398 BUG_ON(num_pages != ttm->num_pages);
399 BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
400
401 /**
402 * Account user pages as lowmem pages for now.
403 */
404
405 ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200406 false, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200407 if (unlikely(ret != 0))
408 return ret;
409
410 down_read(&mm->mmap_sem);
411 ret = get_user_pages(tsk, mm, start, num_pages,
412 write, 0, ttm->pages, NULL);
413 up_read(&mm->mmap_sem);
414
415 if (ret != num_pages && write) {
416 ttm_tt_free_user_pages(ttm);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200417 ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200418 return -ENOMEM;
419 }
420
421 ttm->tsk = tsk;
422 ttm->start = start;
423 ttm->state = tt_unbound;
424
425 return 0;
426}
427
428struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
429 uint32_t page_flags, struct page *dummy_read_page)
430{
431 struct ttm_bo_driver *bo_driver = bdev->driver;
432 struct ttm_tt *ttm;
433
434 if (!bo_driver)
435 return NULL;
436
437 ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
438 if (!ttm)
439 return NULL;
440
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200441 ttm->glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200442 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
443 ttm->first_himem_page = ttm->num_pages;
444 ttm->last_lomem_page = -1;
445 ttm->caching_state = tt_cached;
446 ttm->page_flags = page_flags;
447
448 ttm->dummy_read_page = dummy_read_page;
449
450 ttm_tt_alloc_page_directory(ttm);
451 if (!ttm->pages) {
452 ttm_tt_destroy(ttm);
453 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
454 return NULL;
455 }
456 ttm->be = bo_driver->create_ttm_backend_entry(bdev);
457 if (!ttm->be) {
458 ttm_tt_destroy(ttm);
459 printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
460 return NULL;
461 }
462 ttm->state = tt_unpopulated;
463 return ttm;
464}
465
466void ttm_tt_unbind(struct ttm_tt *ttm)
467{
468 int ret;
469 struct ttm_backend *be = ttm->be;
470
471 if (ttm->state == tt_bound) {
472 ret = be->func->unbind(be);
473 BUG_ON(ret);
474 ttm->state = tt_unbound;
475 }
476}
477
478int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
479{
480 int ret = 0;
481 struct ttm_backend *be;
482
483 if (!ttm)
484 return -EINVAL;
485
486 if (ttm->state == tt_bound)
487 return 0;
488
489 be = ttm->be;
490
491 ret = ttm_tt_populate(ttm);
492 if (ret)
493 return ret;
494
495 ret = be->func->bind(be, bo_mem);
496 if (ret) {
497 printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
498 return ret;
499 }
500
501 ttm->state = tt_bound;
502
503 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
504 ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
505 return 0;
506}
507EXPORT_SYMBOL(ttm_tt_bind);
508
509static int ttm_tt_swapin(struct ttm_tt *ttm)
510{
511 struct address_space *swap_space;
512 struct file *swap_storage;
513 struct page *from_page;
514 struct page *to_page;
515 void *from_virtual;
516 void *to_virtual;
517 int i;
518 int ret;
519
520 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
521 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
522 ttm->num_pages);
523 if (unlikely(ret != 0))
524 return ret;
525
526 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
527 return 0;
528 }
529
530 swap_storage = ttm->swap_storage;
531 BUG_ON(swap_storage == NULL);
532
533 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
534
535 for (i = 0; i < ttm->num_pages; ++i) {
536 from_page = read_mapping_page(swap_space, i, NULL);
537 if (IS_ERR(from_page))
538 goto out_err;
539 to_page = __ttm_tt_get_page(ttm, i);
540 if (unlikely(to_page == NULL))
541 goto out_err;
542
543 preempt_disable();
544 from_virtual = kmap_atomic(from_page, KM_USER0);
545 to_virtual = kmap_atomic(to_page, KM_USER1);
546 memcpy(to_virtual, from_virtual, PAGE_SIZE);
547 kunmap_atomic(to_virtual, KM_USER1);
548 kunmap_atomic(from_virtual, KM_USER0);
549 preempt_enable();
550 page_cache_release(from_page);
551 }
552
553 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
554 fput(swap_storage);
555 ttm->swap_storage = NULL;
556 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
557
558 return 0;
559out_err:
560 ttm_tt_free_alloced_pages(ttm);
561 return -ENOMEM;
562}
563
564int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
565{
566 struct address_space *swap_space;
567 struct file *swap_storage;
568 struct page *from_page;
569 struct page *to_page;
570 void *from_virtual;
571 void *to_virtual;
572 int i;
573
574 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
575 BUG_ON(ttm->caching_state != tt_cached);
576
577 /*
578 * For user buffers, just unpin the pages, as there should be
579 * vma references.
580 */
581
582 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
583 ttm_tt_free_user_pages(ttm);
584 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
585 ttm->swap_storage = NULL;
586 return 0;
587 }
588
589 if (!persistant_swap_storage) {
590 swap_storage = shmem_file_setup("ttm swap",
591 ttm->num_pages << PAGE_SHIFT,
592 0);
593 if (unlikely(IS_ERR(swap_storage))) {
594 printk(KERN_ERR "Failed allocating swap storage.\n");
595 return -ENOMEM;
596 }
597 } else
598 swap_storage = persistant_swap_storage;
599
600 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
601
602 for (i = 0; i < ttm->num_pages; ++i) {
603 from_page = ttm->pages[i];
604 if (unlikely(from_page == NULL))
605 continue;
606 to_page = read_mapping_page(swap_space, i, NULL);
607 if (unlikely(to_page == NULL))
608 goto out_err;
609
610 preempt_disable();
611 from_virtual = kmap_atomic(from_page, KM_USER0);
612 to_virtual = kmap_atomic(to_page, KM_USER1);
613 memcpy(to_virtual, from_virtual, PAGE_SIZE);
614 kunmap_atomic(to_virtual, KM_USER1);
615 kunmap_atomic(from_virtual, KM_USER0);
616 preempt_enable();
617 set_page_dirty(to_page);
618 mark_page_accessed(to_page);
619 page_cache_release(to_page);
620 }
621
622 ttm_tt_free_alloced_pages(ttm);
623 ttm->swap_storage = swap_storage;
624 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
625 if (persistant_swap_storage)
626 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
627
628 return 0;
629out_err:
630 if (!persistant_swap_storage)
631 fput(swap_storage);
632
633 return -ENOMEM;
634}