blob: f82bf805903cb4169f903b8da7ab36a1dcb03bb2 [file] [log] [blame]
Pauli Nieminen1403b1a2010-04-01 12:44:57 +00001/*
2 * Copyright (c) Red Hat Inc.
3
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie <airlied@redhat.com>
24 * Jerome Glisse <jglisse@redhat.com>
25 * Pauli Nieminen <suokkos@gmail.com>
26 */
27
28/* simple list based uncached page pool
29 * - Pool collects resently freed pages for reuse
30 * - Use page->lru to keep a free list
31 * - doesn't track currently in use pages
32 */
33#include <linux/list.h>
34#include <linux/spinlock.h>
35#include <linux/highmem.h>
36#include <linux/mm_types.h>
Pauli Nieminen07458662010-04-01 12:44:58 +000037#include <linux/module.h>
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000038#include <linux/mm.h>
39
40#include <asm/atomic.h>
41#include <asm/agp.h>
42
43#include "ttm/ttm_bo_driver.h"
44#include "ttm/ttm_page_alloc.h"
45
46
47#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
48#define SMALL_ALLOCATION 16
49#define FREE_ALL_PAGES (~0U)
50/* times are in msecs */
51#define PAGE_FREE_INTERVAL 1000
52
53/**
54 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
55 *
56 * @lock: Protects the shared pool from concurrnet access. Must be used with
57 * irqsave/irqrestore variants because pool allocator maybe called from
58 * delayed work.
59 * @fill_lock: Prevent concurrent calls to fill.
60 * @list: Pool of free uc/wc pages for fast reuse.
61 * @gfp_flags: Flags to pass for alloc_page.
62 * @npages: Number of pages in pool.
63 */
64struct ttm_page_pool {
65 spinlock_t lock;
66 bool fill_lock;
67 struct list_head list;
68 int gfp_flags;
69 unsigned npages;
Pauli Nieminen07458662010-04-01 12:44:58 +000070 char *name;
71 unsigned long nfrees;
72 unsigned long nrefills;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000073};
74
75struct ttm_pool_opts {
76 unsigned alloc_size;
77 unsigned max_size;
78 unsigned small;
79};
80
81#define NUM_POOLS 4
82
83/**
84 * struct ttm_pool_manager - Holds memory pools for fst allocation
85 *
86 * Manager is read only object for pool code so it doesn't need locking.
87 *
88 * @free_interval: minimum number of jiffies between freeing pages from pool.
89 * @page_alloc_inited: reference counting for pool allocation.
90 * @work: Work that is used to shrink the pool. Work is only run when there is
91 * some pages to free.
92 * @small_allocation: Limit in number of pages what is small allocation.
93 *
94 * @pools: All pool objects in use.
95 **/
96struct ttm_pool_manager {
97 struct shrinker mm_shrink;
98 atomic_t page_alloc_inited;
99 struct ttm_pool_opts options;
100
101 union {
102 struct ttm_page_pool pools[NUM_POOLS];
103 struct {
104 struct ttm_page_pool wc_pool;
105 struct ttm_page_pool uc_pool;
106 struct ttm_page_pool wc_pool_dma32;
107 struct ttm_page_pool uc_pool_dma32;
108 } ;
109 };
110};
111
112static struct ttm_pool_manager _manager = {
113 .page_alloc_inited = ATOMIC_INIT(0)
114};
115
116#ifdef CONFIG_X86
117/* TODO: add this to x86 like _uc, this version here is inefficient */
118static int set_pages_array_wc(struct page **pages, int addrinarray)
119{
120 int i;
121
122 for (i = 0; i < addrinarray; i++)
123 set_memory_wc((unsigned long)page_address(pages[i]), 1);
124 return 0;
125}
126#else
127static int set_pages_array_wb(struct page **pages, int addrinarray)
128{
129#ifdef TTM_HAS_AGP
130 int i;
131
132 for (i = 0; i < addrinarray; i++)
133 unmap_page_from_agp(pages[i]);
134#endif
135 return 0;
136}
137
138static int set_pages_array_wc(struct page **pages, int addrinarray)
139{
140#ifdef TTM_HAS_AGP
141 int i;
142
143 for (i = 0; i < addrinarray; i++)
144 map_page_into_agp(pages[i]);
145#endif
146 return 0;
147}
148
149static int set_pages_array_uc(struct page **pages, int addrinarray)
150{
151#ifdef TTM_HAS_AGP
152 int i;
153
154 for (i = 0; i < addrinarray; i++)
155 map_page_into_agp(pages[i]);
156#endif
157 return 0;
158}
159#endif
160
161/**
162 * Select the right pool or requested caching state and ttm flags. */
163static struct ttm_page_pool *ttm_get_pool(int flags,
164 enum ttm_caching_state cstate)
165{
166 int pool_index;
167
168 if (cstate == tt_cached)
169 return NULL;
170
171 if (cstate == tt_wc)
172 pool_index = 0x0;
173 else
174 pool_index = 0x1;
175
176 if (flags & TTM_PAGE_FLAG_DMA32)
177 pool_index |= 0x2;
178
179 return &_manager.pools[pool_index];
180}
181
182/* set memory back to wb and free the pages. */
183static void ttm_pages_put(struct page *pages[], unsigned npages)
184{
185 unsigned i;
186 if (set_pages_array_wb(pages, npages))
187 printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n",
188 npages);
189 for (i = 0; i < npages; ++i)
190 __free_page(pages[i]);
191}
192
193static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
194 unsigned freed_pages)
195{
196 pool->npages -= freed_pages;
Pauli Nieminen07458662010-04-01 12:44:58 +0000197 pool->nfrees += freed_pages;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000198}
199
200/**
201 * Free pages from pool.
202 *
203 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
204 * number of pages in one go.
205 *
206 * @pool: to free the pages from
207 * @free_all: If set to true will free all pages in pool
208 **/
209static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
210{
211 unsigned long irq_flags;
212 struct page *p;
213 struct page **pages_to_free;
214 unsigned freed_pages = 0,
215 npages_to_free = nr_free;
216
217 if (NUM_PAGES_TO_ALLOC < nr_free)
218 npages_to_free = NUM_PAGES_TO_ALLOC;
219
220 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
221 GFP_KERNEL);
222 if (!pages_to_free) {
223 printk(KERN_ERR "Failed to allocate memory for pool free operation.\n");
224 return 0;
225 }
226
227restart:
228 spin_lock_irqsave(&pool->lock, irq_flags);
229
230 list_for_each_entry_reverse(p, &pool->list, lru) {
231 if (freed_pages >= npages_to_free)
232 break;
233
234 pages_to_free[freed_pages++] = p;
235 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
236 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
237 /* remove range of pages from the pool */
238 __list_del(p->lru.prev, &pool->list);
239
240 ttm_pool_update_free_locked(pool, freed_pages);
241 /**
242 * Because changing page caching is costly
243 * we unlock the pool to prevent stalling.
244 */
245 spin_unlock_irqrestore(&pool->lock, irq_flags);
246
247 ttm_pages_put(pages_to_free, freed_pages);
248 if (likely(nr_free != FREE_ALL_PAGES))
249 nr_free -= freed_pages;
250
251 if (NUM_PAGES_TO_ALLOC >= nr_free)
252 npages_to_free = nr_free;
253 else
254 npages_to_free = NUM_PAGES_TO_ALLOC;
255
256 freed_pages = 0;
257
258 /* free all so restart the processing */
259 if (nr_free)
260 goto restart;
261
262 /* Not allowed to fall tough or break because
263 * following context is inside spinlock while we are
264 * outside here.
265 */
266 goto out;
267
268 }
269 }
270
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000271 /* remove range of pages from the pool */
272 if (freed_pages) {
273 __list_del(&p->lru, &pool->list);
274
275 ttm_pool_update_free_locked(pool, freed_pages);
276 nr_free -= freed_pages;
277 }
278
279 spin_unlock_irqrestore(&pool->lock, irq_flags);
280
281 if (freed_pages)
282 ttm_pages_put(pages_to_free, freed_pages);
283out:
284 kfree(pages_to_free);
285 return nr_free;
286}
287
288/* Get good estimation how many pages are free in pools */
289static int ttm_pool_get_num_unused_pages(void)
290{
291 unsigned i;
292 int total = 0;
293 for (i = 0; i < NUM_POOLS; ++i)
294 total += _manager.pools[i].npages;
295
296 return total;
297}
298
299/**
300 * Calback for mm to request pool to reduce number of page held.
301 */
302static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
303{
304 static atomic_t start_pool = ATOMIC_INIT(0);
305 unsigned i;
306 unsigned pool_offset = atomic_add_return(1, &start_pool);
307 struct ttm_page_pool *pool;
308
309 pool_offset = pool_offset % NUM_POOLS;
310 /* select start pool in round robin fashion */
311 for (i = 0; i < NUM_POOLS; ++i) {
312 unsigned nr_free = shrink_pages;
313 if (shrink_pages == 0)
314 break;
315 pool = &_manager.pools[(i + pool_offset)%NUM_POOLS];
316 shrink_pages = ttm_page_pool_free(pool, nr_free);
317 }
318 /* return estimated number of unused pages in pool */
319 return ttm_pool_get_num_unused_pages();
320}
321
322static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
323{
324 manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
325 manager->mm_shrink.seeks = 1;
326 register_shrinker(&manager->mm_shrink);
327}
328
329static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
330{
331 unregister_shrinker(&manager->mm_shrink);
332}
333
334static int ttm_set_pages_caching(struct page **pages,
335 enum ttm_caching_state cstate, unsigned cpages)
336{
337 int r = 0;
338 /* Set page caching */
339 switch (cstate) {
340 case tt_uncached:
341 r = set_pages_array_uc(pages, cpages);
342 if (r)
343 printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n",
344 cpages);
345 break;
346 case tt_wc:
347 r = set_pages_array_wc(pages, cpages);
348 if (r)
349 printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n",
350 cpages);
351 break;
352 default:
353 break;
354 }
355 return r;
356}
357
358/**
359 * Free pages the pages that failed to change the caching state. If there is
360 * any pages that have changed their caching state already put them to the
361 * pool.
362 */
363static void ttm_handle_caching_state_failure(struct list_head *pages,
364 int ttm_flags, enum ttm_caching_state cstate,
365 struct page **failed_pages, unsigned cpages)
366{
367 unsigned i;
368 /* Failed pages has to be reed */
369 for (i = 0; i < cpages; ++i) {
370 list_del(&failed_pages[i]->lru);
371 __free_page(failed_pages[i]);
372 }
373}
374
375/**
376 * Allocate new pages with correct caching.
377 *
378 * This function is reentrant if caller updates count depending on number of
379 * pages returned in pages array.
380 */
381static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
382 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
383{
384 struct page **caching_array;
385 struct page *p;
386 int r = 0;
387 unsigned i, cpages;
388 unsigned max_cpages = min(count,
389 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
390
391 /* allocate array for page caching change */
392 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
393
394 if (!caching_array) {
395 printk(KERN_ERR "[ttm] unable to allocate table for new pages.");
396 return -ENOMEM;
397 }
398
399 for (i = 0, cpages = 0; i < count; ++i) {
400 p = alloc_page(gfp_flags);
401
402 if (!p) {
403 printk(KERN_ERR "[ttm] unable to get page %u\n", i);
404
405 /* store already allocated pages in the pool after
406 * setting the caching state */
407 if (cpages) {
408 r = ttm_set_pages_caching(caching_array, cstate, cpages);
409 if (r)
410 ttm_handle_caching_state_failure(pages,
411 ttm_flags, cstate,
412 caching_array, cpages);
413 }
414 r = -ENOMEM;
415 goto out;
416 }
417
418#ifdef CONFIG_HIGHMEM
419 /* gfp flags of highmem page should never be dma32 so we
420 * we should be fine in such case
421 */
422 if (!PageHighMem(p))
423#endif
424 {
425 caching_array[cpages++] = p;
426 if (cpages == max_cpages) {
427
428 r = ttm_set_pages_caching(caching_array,
429 cstate, cpages);
430 if (r) {
431 ttm_handle_caching_state_failure(pages,
432 ttm_flags, cstate,
433 caching_array, cpages);
434 goto out;
435 }
436 cpages = 0;
437 }
438 }
439
440 list_add(&p->lru, pages);
441 }
442
443 if (cpages) {
444 r = ttm_set_pages_caching(caching_array, cstate, cpages);
445 if (r)
446 ttm_handle_caching_state_failure(pages,
447 ttm_flags, cstate,
448 caching_array, cpages);
449 }
450out:
451 kfree(caching_array);
452
453 return r;
454}
455
456/**
457 * Fill the given pool if there isn't enough pages and requested number of
458 * pages is small.
459 */
460static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
461 int ttm_flags, enum ttm_caching_state cstate, unsigned count,
462 unsigned long *irq_flags)
463{
464 struct page *p;
465 int r;
466 unsigned cpages = 0;
467 /**
468 * Only allow one pool fill operation at a time.
469 * If pool doesn't have enough pages for the allocation new pages are
470 * allocated from outside of pool.
471 */
472 if (pool->fill_lock)
473 return;
474
475 pool->fill_lock = true;
476
477 /* If allocation request is small and there is not enough
478 * pages in pool we fill the pool first */
479 if (count < _manager.options.small
480 && count > pool->npages) {
481 struct list_head new_pages;
482 unsigned alloc_size = _manager.options.alloc_size;
483
484 /**
485 * Can't change page caching if in irqsave context. We have to
486 * drop the pool->lock.
487 */
488 spin_unlock_irqrestore(&pool->lock, *irq_flags);
489
490 INIT_LIST_HEAD(&new_pages);
491 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
492 cstate, alloc_size);
493 spin_lock_irqsave(&pool->lock, *irq_flags);
494
495 if (!r) {
496 list_splice(&new_pages, &pool->list);
Pauli Nieminen07458662010-04-01 12:44:58 +0000497 ++pool->nrefills;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000498 pool->npages += alloc_size;
499 } else {
500 printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool);
501 /* If we have any pages left put them to the pool. */
502 list_for_each_entry(p, &pool->list, lru) {
503 ++cpages;
504 }
505 list_splice(&new_pages, &pool->list);
506 pool->npages += cpages;
507 }
508
509 }
510 pool->fill_lock = false;
511}
512
513/**
514 * Cut count nubmer of pages from the pool and put them to return list
515 *
516 * @return count of pages still to allocate to fill the request.
517 */
518static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
519 struct list_head *pages, int ttm_flags,
520 enum ttm_caching_state cstate, unsigned count)
521{
522 unsigned long irq_flags;
523 struct list_head *p;
524 unsigned i;
525
526 spin_lock_irqsave(&pool->lock, irq_flags);
527 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
528
529 if (count >= pool->npages) {
530 /* take all pages from the pool */
531 list_splice_init(&pool->list, pages);
532 count -= pool->npages;
533 pool->npages = 0;
534 goto out;
535 }
536 /* find the last pages to include for requested number of pages. Split
537 * pool to begin and halves to reduce search space. */
538 if (count <= pool->npages/2) {
539 i = 0;
540 list_for_each(p, &pool->list) {
541 if (++i == count)
542 break;
543 }
544 } else {
545 i = pool->npages + 1;
546 list_for_each_prev(p, &pool->list) {
547 if (--i == count)
548 break;
549 }
550 }
551 /* Cut count number of pages from pool */
552 list_cut_position(pages, &pool->list, p);
553 pool->npages -= count;
554 count = 0;
555out:
556 spin_unlock_irqrestore(&pool->lock, irq_flags);
557 return count;
558}
559
560/*
561 * On success pages list will hold count number of correctly
562 * cached pages.
563 */
564int ttm_get_pages(struct list_head *pages, int flags,
565 enum ttm_caching_state cstate, unsigned count)
566{
567 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
568 struct page *p = NULL;
569 int gfp_flags = 0;
570 int r;
571
572 /* set zero flag for page allocation if required */
573 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
574 gfp_flags |= __GFP_ZERO;
575
576 /* No pool for cached pages */
577 if (pool == NULL) {
578 if (flags & TTM_PAGE_FLAG_DMA32)
579 gfp_flags |= GFP_DMA32;
580 else
581 gfp_flags |= __GFP_HIGHMEM;
582
583 for (r = 0; r < count; ++r) {
584 p = alloc_page(gfp_flags);
585 if (!p) {
586
587 printk(KERN_ERR "[ttm] unable to allocate page.");
588 return -ENOMEM;
589 }
590
591 list_add(&p->lru, pages);
592 }
593 return 0;
594 }
595
596
597 /* combine zero flag to pool flags */
598 gfp_flags |= pool->gfp_flags;
599
600 /* First we take pages from the pool */
601 count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
602
603 /* clear the pages coming from the pool if requested */
604 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
605 list_for_each_entry(p, pages, lru) {
606 clear_page(page_address(p));
607 }
608 }
609
610 /* If pool didn't have enough pages allocate new one. */
611 if (count > 0) {
612 /* ttm_alloc_new_pages doesn't reference pool so we can run
613 * multiple requests in parallel.
614 **/
615 r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
616 if (r) {
617 /* If there is any pages in the list put them back to
618 * the pool. */
619 printk(KERN_ERR "[ttm] Failed to allocate extra pages "
620 "for large request.");
621 ttm_put_pages(pages, 0, flags, cstate);
622 return r;
623 }
624 }
625
626
627 return 0;
628}
629
630/* Put all pages in pages list to correct pool to wait for reuse */
631void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
632 enum ttm_caching_state cstate)
633{
634 unsigned long irq_flags;
635 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
636 struct page *p, *tmp;
637
638 if (pool == NULL) {
639 /* No pool for this memory type so free the pages */
640
641 list_for_each_entry_safe(p, tmp, pages, lru) {
642 __free_page(p);
643 }
644 /* Make the pages list empty */
645 INIT_LIST_HEAD(pages);
646 return;
647 }
648 if (page_count == 0) {
649 list_for_each_entry_safe(p, tmp, pages, lru) {
650 ++page_count;
651 }
652 }
653
654 spin_lock_irqsave(&pool->lock, irq_flags);
655 list_splice_init(pages, &pool->list);
656 pool->npages += page_count;
657 /* Check that we don't go over the pool limit */
658 page_count = 0;
659 if (pool->npages > _manager.options.max_size) {
660 page_count = pool->npages - _manager.options.max_size;
661 /* free at least NUM_PAGES_TO_ALLOC number of pages
662 * to reduce calls to set_memory_wb */
663 if (page_count < NUM_PAGES_TO_ALLOC)
664 page_count = NUM_PAGES_TO_ALLOC;
665 }
666 spin_unlock_irqrestore(&pool->lock, irq_flags);
667 if (page_count)
668 ttm_page_pool_free(pool, page_count);
669}
670
Pauli Nieminen07458662010-04-01 12:44:58 +0000671static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
672 char *name)
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000673{
674 spin_lock_init(&pool->lock);
675 pool->fill_lock = false;
676 INIT_LIST_HEAD(&pool->list);
Pauli Nieminen07458662010-04-01 12:44:58 +0000677 pool->npages = pool->nfrees = 0;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000678 pool->gfp_flags = flags;
Pauli Nieminen07458662010-04-01 12:44:58 +0000679 pool->name = name;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000680}
681
682int ttm_page_alloc_init(unsigned max_pages)
683{
684 if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
685 return 0;
686
687 printk(KERN_INFO "[ttm] Initializing pool allocator.\n");
688
Pauli Nieminen07458662010-04-01 12:44:58 +0000689 ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000690
Pauli Nieminen07458662010-04-01 12:44:58 +0000691 ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc");
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000692
Pauli Nieminen07458662010-04-01 12:44:58 +0000693 ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32,
694 "wc dma");
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000695
Pauli Nieminen07458662010-04-01 12:44:58 +0000696 ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32,
697 "uc dma");
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000698
699 _manager.options.max_size = max_pages;
700 _manager.options.small = SMALL_ALLOCATION;
701 _manager.options.alloc_size = NUM_PAGES_TO_ALLOC;
702
703 ttm_pool_mm_shrink_init(&_manager);
704
705 return 0;
706}
707
708void ttm_page_alloc_fini()
709{
710 int i;
711
712 if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
713 return;
714
715 printk(KERN_INFO "[ttm] Finilizing pool allocator.\n");
716 ttm_pool_mm_shrink_fini(&_manager);
717
718 for (i = 0; i < NUM_POOLS; ++i)
719 ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES);
720}
Pauli Nieminen07458662010-04-01 12:44:58 +0000721
722int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
723{
724 struct ttm_page_pool *p;
725 unsigned i;
726 char *h[] = {"pool", "refills", "pages freed", "size"};
727 if (atomic_read(&_manager.page_alloc_inited) == 0) {
728 seq_printf(m, "No pool allocator running.\n");
729 return 0;
730 }
731 seq_printf(m, "%6s %12s %13s %8s\n",
732 h[0], h[1], h[2], h[3]);
733 for (i = 0; i < NUM_POOLS; ++i) {
734 p = &_manager.pools[i];
735
736 seq_printf(m, "%6s %12ld %13ld %8d\n",
737 p->name, p->nrefills,
738 p->nfrees, p->npages);
739 }
740 return 0;
741}
742EXPORT_SYMBOL(ttm_page_alloc_debugfs);