blob: 499debda791e9534f05b397e5a400ef69c97a293 [file] [log] [blame]
Pauli Nieminen1403b1a2010-04-01 12:44:57 +00001/*
2 * Copyright (c) Red Hat Inc.
3
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie <airlied@redhat.com>
24 * Jerome Glisse <jglisse@redhat.com>
25 * Pauli Nieminen <suokkos@gmail.com>
26 */
27
28/* simple list based uncached page pool
29 * - Pool collects resently freed pages for reuse
30 * - Use page->lru to keep a free list
31 * - doesn't track currently in use pages
32 */
33#include <linux/list.h>
34#include <linux/spinlock.h>
35#include <linux/highmem.h>
36#include <linux/mm_types.h>
Pauli Nieminen07458662010-04-01 12:44:58 +000037#include <linux/module.h>
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000038#include <linux/mm.h>
Matt Turner4cdc8402010-04-07 22:42:04 +000039#include <linux/seq_file.h> /* for seq_printf */
Stephen Rothwell2125b8a2010-04-08 13:42:03 +100040#include <linux/slab.h>
Konrad Rzeszutek Wilkf9820a42010-11-29 13:52:18 -050041#include <linux/dma-mapping.h>
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000042
Arun Sharma600634972011-07-26 16:09:06 -070043#include <linux/atomic.h>
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000044
45#include "ttm/ttm_bo_driver.h"
46#include "ttm/ttm_page_alloc.h"
47
Luck, Tonyd6678652010-07-21 10:15:39 -070048#ifdef TTM_HAS_AGP
49#include <asm/agp.h>
50#endif
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000051
52#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
53#define SMALL_ALLOCATION 16
54#define FREE_ALL_PAGES (~0U)
55/* times are in msecs */
56#define PAGE_FREE_INTERVAL 1000
57
58/**
59 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
60 *
61 * @lock: Protects the shared pool from concurrnet access. Must be used with
62 * irqsave/irqrestore variants because pool allocator maybe called from
63 * delayed work.
64 * @fill_lock: Prevent concurrent calls to fill.
65 * @list: Pool of free uc/wc pages for fast reuse.
66 * @gfp_flags: Flags to pass for alloc_page.
67 * @npages: Number of pages in pool.
68 */
69struct ttm_page_pool {
70 spinlock_t lock;
71 bool fill_lock;
72 struct list_head list;
Daniel J Blueman0e57a3c2010-09-22 17:45:45 +010073 gfp_t gfp_flags;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000074 unsigned npages;
Pauli Nieminen07458662010-04-01 12:44:58 +000075 char *name;
76 unsigned long nfrees;
77 unsigned long nrefills;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000078};
79
Pauli Nieminenc96af792010-04-01 12:45:03 +000080/**
81 * Limits for the pool. They are handled without locks because only place where
82 * they may change is in sysfs store. They won't have immediate effect anyway
Thomas Hellstrom4abe4382010-05-26 16:21:04 +020083 * so forcing serialization to access them is pointless.
Pauli Nieminenc96af792010-04-01 12:45:03 +000084 */
85
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000086struct ttm_pool_opts {
87 unsigned alloc_size;
88 unsigned max_size;
89 unsigned small;
90};
91
92#define NUM_POOLS 4
93
94/**
95 * struct ttm_pool_manager - Holds memory pools for fst allocation
96 *
97 * Manager is read only object for pool code so it doesn't need locking.
98 *
99 * @free_interval: minimum number of jiffies between freeing pages from pool.
100 * @page_alloc_inited: reference counting for pool allocation.
101 * @work: Work that is used to shrink the pool. Work is only run when there is
102 * some pages to free.
103 * @small_allocation: Limit in number of pages what is small allocation.
104 *
105 * @pools: All pool objects in use.
106 **/
107struct ttm_pool_manager {
Pauli Nieminenc96af792010-04-01 12:45:03 +0000108 struct kobject kobj;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000109 struct shrinker mm_shrink;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000110 struct ttm_pool_opts options;
111
112 union {
113 struct ttm_page_pool pools[NUM_POOLS];
114 struct {
115 struct ttm_page_pool wc_pool;
116 struct ttm_page_pool uc_pool;
117 struct ttm_page_pool wc_pool_dma32;
118 struct ttm_page_pool uc_pool_dma32;
119 } ;
120 };
121};
122
Pauli Nieminenc96af792010-04-01 12:45:03 +0000123static struct attribute ttm_page_pool_max = {
124 .name = "pool_max_size",
125 .mode = S_IRUGO | S_IWUSR
126};
127static struct attribute ttm_page_pool_small = {
128 .name = "pool_small_allocation",
129 .mode = S_IRUGO | S_IWUSR
130};
131static struct attribute ttm_page_pool_alloc_size = {
132 .name = "pool_allocation_size",
133 .mode = S_IRUGO | S_IWUSR
134};
135
136static struct attribute *ttm_pool_attrs[] = {
137 &ttm_page_pool_max,
138 &ttm_page_pool_small,
139 &ttm_page_pool_alloc_size,
140 NULL
141};
142
143static void ttm_pool_kobj_release(struct kobject *kobj)
144{
145 struct ttm_pool_manager *m =
146 container_of(kobj, struct ttm_pool_manager, kobj);
Francisco Jerez5870a4d2010-07-04 04:03:07 +0200147 kfree(m);
Pauli Nieminenc96af792010-04-01 12:45:03 +0000148}
149
150static ssize_t ttm_pool_store(struct kobject *kobj,
151 struct attribute *attr, const char *buffer, size_t size)
152{
153 struct ttm_pool_manager *m =
154 container_of(kobj, struct ttm_pool_manager, kobj);
155 int chars;
156 unsigned val;
157 chars = sscanf(buffer, "%u", &val);
158 if (chars == 0)
159 return size;
160
161 /* Convert kb to number of pages */
162 val = val / (PAGE_SIZE >> 10);
163
164 if (attr == &ttm_page_pool_max)
165 m->options.max_size = val;
166 else if (attr == &ttm_page_pool_small)
167 m->options.small = val;
168 else if (attr == &ttm_page_pool_alloc_size) {
169 if (val > NUM_PAGES_TO_ALLOC*8) {
Thomas Hellstrom4abe4382010-05-26 16:21:04 +0200170 printk(KERN_ERR TTM_PFX
171 "Setting allocation size to %lu "
172 "is not allowed. Recommended size is "
173 "%lu\n",
174 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
175 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
Pauli Nieminenc96af792010-04-01 12:45:03 +0000176 return size;
177 } else if (val > NUM_PAGES_TO_ALLOC) {
Thomas Hellstrom4abe4382010-05-26 16:21:04 +0200178 printk(KERN_WARNING TTM_PFX
179 "Setting allocation size to "
180 "larger than %lu is not recommended.\n",
181 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
Pauli Nieminenc96af792010-04-01 12:45:03 +0000182 }
183 m->options.alloc_size = val;
184 }
185
186 return size;
187}
188
189static ssize_t ttm_pool_show(struct kobject *kobj,
190 struct attribute *attr, char *buffer)
191{
192 struct ttm_pool_manager *m =
193 container_of(kobj, struct ttm_pool_manager, kobj);
194 unsigned val = 0;
195
196 if (attr == &ttm_page_pool_max)
197 val = m->options.max_size;
198 else if (attr == &ttm_page_pool_small)
199 val = m->options.small;
200 else if (attr == &ttm_page_pool_alloc_size)
201 val = m->options.alloc_size;
202
203 val = val * (PAGE_SIZE >> 10);
204
205 return snprintf(buffer, PAGE_SIZE, "%u\n", val);
206}
207
208static const struct sysfs_ops ttm_pool_sysfs_ops = {
209 .show = &ttm_pool_show,
210 .store = &ttm_pool_store,
211};
212
213static struct kobj_type ttm_pool_kobj_type = {
214 .release = &ttm_pool_kobj_release,
215 .sysfs_ops = &ttm_pool_sysfs_ops,
216 .default_attrs = ttm_pool_attrs,
217};
218
Francisco Jerez5870a4d2010-07-04 04:03:07 +0200219static struct ttm_pool_manager *_manager;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000220
Pauli Nieminen975efdb2010-04-01 12:45:02 +0000221#ifndef CONFIG_X86
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000222static int set_pages_array_wb(struct page **pages, int addrinarray)
223{
224#ifdef TTM_HAS_AGP
225 int i;
226
227 for (i = 0; i < addrinarray; i++)
228 unmap_page_from_agp(pages[i]);
229#endif
230 return 0;
231}
232
233static int set_pages_array_wc(struct page **pages, int addrinarray)
234{
235#ifdef TTM_HAS_AGP
236 int i;
237
238 for (i = 0; i < addrinarray; i++)
239 map_page_into_agp(pages[i]);
240#endif
241 return 0;
242}
243
244static int set_pages_array_uc(struct page **pages, int addrinarray)
245{
246#ifdef TTM_HAS_AGP
247 int i;
248
249 for (i = 0; i < addrinarray; i++)
250 map_page_into_agp(pages[i]);
251#endif
252 return 0;
253}
254#endif
255
256/**
257 * Select the right pool or requested caching state and ttm flags. */
258static struct ttm_page_pool *ttm_get_pool(int flags,
259 enum ttm_caching_state cstate)
260{
261 int pool_index;
262
263 if (cstate == tt_cached)
264 return NULL;
265
266 if (cstate == tt_wc)
267 pool_index = 0x0;
268 else
269 pool_index = 0x1;
270
271 if (flags & TTM_PAGE_FLAG_DMA32)
272 pool_index |= 0x2;
273
Francisco Jerez5870a4d2010-07-04 04:03:07 +0200274 return &_manager->pools[pool_index];
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000275}
276
277/* set memory back to wb and free the pages. */
278static void ttm_pages_put(struct page *pages[], unsigned npages)
279{
280 unsigned i;
281 if (set_pages_array_wb(pages, npages))
Thomas Hellstrom4abe4382010-05-26 16:21:04 +0200282 printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000283 npages);
284 for (i = 0; i < npages; ++i)
285 __free_page(pages[i]);
286}
287
288static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
289 unsigned freed_pages)
290{
291 pool->npages -= freed_pages;
Pauli Nieminen07458662010-04-01 12:44:58 +0000292 pool->nfrees += freed_pages;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000293}
294
295/**
296 * Free pages from pool.
297 *
298 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
299 * number of pages in one go.
300 *
301 * @pool: to free the pages from
302 * @free_all: If set to true will free all pages in pool
303 **/
304static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
305{
306 unsigned long irq_flags;
307 struct page *p;
308 struct page **pages_to_free;
309 unsigned freed_pages = 0,
310 npages_to_free = nr_free;
311
312 if (NUM_PAGES_TO_ALLOC < nr_free)
313 npages_to_free = NUM_PAGES_TO_ALLOC;
314
315 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
316 GFP_KERNEL);
317 if (!pages_to_free) {
Thomas Hellstrom4abe4382010-05-26 16:21:04 +0200318 printk(KERN_ERR TTM_PFX
319 "Failed to allocate memory for pool free operation.\n");
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000320 return 0;
321 }
322
323restart:
324 spin_lock_irqsave(&pool->lock, irq_flags);
325
326 list_for_each_entry_reverse(p, &pool->list, lru) {
327 if (freed_pages >= npages_to_free)
328 break;
329
330 pages_to_free[freed_pages++] = p;
331 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
332 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
333 /* remove range of pages from the pool */
334 __list_del(p->lru.prev, &pool->list);
335
336 ttm_pool_update_free_locked(pool, freed_pages);
337 /**
338 * Because changing page caching is costly
339 * we unlock the pool to prevent stalling.
340 */
341 spin_unlock_irqrestore(&pool->lock, irq_flags);
342
343 ttm_pages_put(pages_to_free, freed_pages);
344 if (likely(nr_free != FREE_ALL_PAGES))
345 nr_free -= freed_pages;
346
347 if (NUM_PAGES_TO_ALLOC >= nr_free)
348 npages_to_free = nr_free;
349 else
350 npages_to_free = NUM_PAGES_TO_ALLOC;
351
352 freed_pages = 0;
353
354 /* free all so restart the processing */
355 if (nr_free)
356 goto restart;
357
Konrad Rzeszutek Wilk0d74f862011-06-08 17:06:15 +0000358 /* Not allowed to fall through or break because
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000359 * following context is inside spinlock while we are
360 * outside here.
361 */
362 goto out;
363
364 }
365 }
366
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000367 /* remove range of pages from the pool */
368 if (freed_pages) {
369 __list_del(&p->lru, &pool->list);
370
371 ttm_pool_update_free_locked(pool, freed_pages);
372 nr_free -= freed_pages;
373 }
374
375 spin_unlock_irqrestore(&pool->lock, irq_flags);
376
377 if (freed_pages)
378 ttm_pages_put(pages_to_free, freed_pages);
379out:
380 kfree(pages_to_free);
381 return nr_free;
382}
383
384/* Get good estimation how many pages are free in pools */
385static int ttm_pool_get_num_unused_pages(void)
386{
387 unsigned i;
388 int total = 0;
389 for (i = 0; i < NUM_POOLS; ++i)
Francisco Jerez5870a4d2010-07-04 04:03:07 +0200390 total += _manager->pools[i].npages;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000391
392 return total;
393}
394
395/**
Thomas Hellstrom4abe4382010-05-26 16:21:04 +0200396 * Callback for mm to request pool to reduce number of page held.
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000397 */
Ying Han1495f232011-05-24 17:12:27 -0700398static int ttm_pool_mm_shrink(struct shrinker *shrink,
399 struct shrink_control *sc)
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000400{
401 static atomic_t start_pool = ATOMIC_INIT(0);
402 unsigned i;
403 unsigned pool_offset = atomic_add_return(1, &start_pool);
404 struct ttm_page_pool *pool;
Ying Han1495f232011-05-24 17:12:27 -0700405 int shrink_pages = sc->nr_to_scan;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000406
407 pool_offset = pool_offset % NUM_POOLS;
408 /* select start pool in round robin fashion */
409 for (i = 0; i < NUM_POOLS; ++i) {
410 unsigned nr_free = shrink_pages;
411 if (shrink_pages == 0)
412 break;
Francisco Jerez5870a4d2010-07-04 04:03:07 +0200413 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000414 shrink_pages = ttm_page_pool_free(pool, nr_free);
415 }
416 /* return estimated number of unused pages in pool */
417 return ttm_pool_get_num_unused_pages();
418}
419
420static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
421{
422 manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
423 manager->mm_shrink.seeks = 1;
424 register_shrinker(&manager->mm_shrink);
425}
426
427static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
428{
429 unregister_shrinker(&manager->mm_shrink);
430}
431
432static int ttm_set_pages_caching(struct page **pages,
433 enum ttm_caching_state cstate, unsigned cpages)
434{
435 int r = 0;
436 /* Set page caching */
437 switch (cstate) {
438 case tt_uncached:
439 r = set_pages_array_uc(pages, cpages);
440 if (r)
Thomas Hellstrom4abe4382010-05-26 16:21:04 +0200441 printk(KERN_ERR TTM_PFX
442 "Failed to set %d pages to uc!\n",
443 cpages);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000444 break;
445 case tt_wc:
446 r = set_pages_array_wc(pages, cpages);
447 if (r)
Thomas Hellstrom4abe4382010-05-26 16:21:04 +0200448 printk(KERN_ERR TTM_PFX
449 "Failed to set %d pages to wc!\n",
450 cpages);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000451 break;
452 default:
453 break;
454 }
455 return r;
456}
457
458/**
459 * Free pages the pages that failed to change the caching state. If there is
460 * any pages that have changed their caching state already put them to the
461 * pool.
462 */
463static void ttm_handle_caching_state_failure(struct list_head *pages,
464 int ttm_flags, enum ttm_caching_state cstate,
465 struct page **failed_pages, unsigned cpages)
466{
467 unsigned i;
Thomas Hellstrom4abe4382010-05-26 16:21:04 +0200468 /* Failed pages have to be freed */
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000469 for (i = 0; i < cpages; ++i) {
470 list_del(&failed_pages[i]->lru);
471 __free_page(failed_pages[i]);
472 }
473}
474
475/**
476 * Allocate new pages with correct caching.
477 *
478 * This function is reentrant if caller updates count depending on number of
479 * pages returned in pages array.
480 */
Daniel J Blueman0e57a3c2010-09-22 17:45:45 +0100481static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000482 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
483{
484 struct page **caching_array;
485 struct page *p;
486 int r = 0;
487 unsigned i, cpages;
488 unsigned max_cpages = min(count,
489 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
490
491 /* allocate array for page caching change */
492 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
493
494 if (!caching_array) {
Thomas Hellstrom4abe4382010-05-26 16:21:04 +0200495 printk(KERN_ERR TTM_PFX
496 "Unable to allocate table for new pages.");
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000497 return -ENOMEM;
498 }
499
500 for (i = 0, cpages = 0; i < count; ++i) {
501 p = alloc_page(gfp_flags);
502
503 if (!p) {
Thomas Hellstrom4abe4382010-05-26 16:21:04 +0200504 printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000505
506 /* store already allocated pages in the pool after
507 * setting the caching state */
508 if (cpages) {
Thomas Hellstrom4abe4382010-05-26 16:21:04 +0200509 r = ttm_set_pages_caching(caching_array,
510 cstate, cpages);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000511 if (r)
512 ttm_handle_caching_state_failure(pages,
513 ttm_flags, cstate,
514 caching_array, cpages);
515 }
516 r = -ENOMEM;
517 goto out;
518 }
519
520#ifdef CONFIG_HIGHMEM
521 /* gfp flags of highmem page should never be dma32 so we
522 * we should be fine in such case
523 */
524 if (!PageHighMem(p))
525#endif
526 {
527 caching_array[cpages++] = p;
528 if (cpages == max_cpages) {
529
530 r = ttm_set_pages_caching(caching_array,
531 cstate, cpages);
532 if (r) {
533 ttm_handle_caching_state_failure(pages,
534 ttm_flags, cstate,
535 caching_array, cpages);
536 goto out;
537 }
538 cpages = 0;
539 }
540 }
541
542 list_add(&p->lru, pages);
543 }
544
545 if (cpages) {
546 r = ttm_set_pages_caching(caching_array, cstate, cpages);
547 if (r)
548 ttm_handle_caching_state_failure(pages,
549 ttm_flags, cstate,
550 caching_array, cpages);
551 }
552out:
553 kfree(caching_array);
554
555 return r;
556}
557
558/**
Konrad Rzeszutek Wilk0d74f862011-06-08 17:06:15 +0000559 * Fill the given pool if there aren't enough pages and the requested number of
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000560 * pages is small.
561 */
562static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
563 int ttm_flags, enum ttm_caching_state cstate, unsigned count,
564 unsigned long *irq_flags)
565{
566 struct page *p;
567 int r;
568 unsigned cpages = 0;
569 /**
570 * Only allow one pool fill operation at a time.
571 * If pool doesn't have enough pages for the allocation new pages are
572 * allocated from outside of pool.
573 */
574 if (pool->fill_lock)
575 return;
576
577 pool->fill_lock = true;
578
Konrad Rzeszutek Wilk0d74f862011-06-08 17:06:15 +0000579 /* If allocation request is small and there are not enough
580 * pages in a pool we fill the pool up first. */
Francisco Jerez5870a4d2010-07-04 04:03:07 +0200581 if (count < _manager->options.small
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000582 && count > pool->npages) {
583 struct list_head new_pages;
Francisco Jerez5870a4d2010-07-04 04:03:07 +0200584 unsigned alloc_size = _manager->options.alloc_size;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000585
586 /**
587 * Can't change page caching if in irqsave context. We have to
588 * drop the pool->lock.
589 */
590 spin_unlock_irqrestore(&pool->lock, *irq_flags);
591
592 INIT_LIST_HEAD(&new_pages);
593 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
594 cstate, alloc_size);
595 spin_lock_irqsave(&pool->lock, *irq_flags);
596
597 if (!r) {
598 list_splice(&new_pages, &pool->list);
Pauli Nieminen07458662010-04-01 12:44:58 +0000599 ++pool->nrefills;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000600 pool->npages += alloc_size;
601 } else {
Thomas Hellstrom4abe4382010-05-26 16:21:04 +0200602 printk(KERN_ERR TTM_PFX
603 "Failed to fill pool (%p).", pool);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000604 /* If we have any pages left put them to the pool. */
605 list_for_each_entry(p, &pool->list, lru) {
606 ++cpages;
607 }
608 list_splice(&new_pages, &pool->list);
609 pool->npages += cpages;
610 }
611
612 }
613 pool->fill_lock = false;
614}
615
616/**
Konrad Rzeszutek Wilk0d74f862011-06-08 17:06:15 +0000617 * Cut 'count' number of pages from the pool and put them on the return list.
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000618 *
Konrad Rzeszutek Wilk0d74f862011-06-08 17:06:15 +0000619 * @return count of pages still required to fulfill the request.
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000620 */
621static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
Jerome Glisse822c4d92011-11-10 18:24:09 -0500622 struct list_head *pages,
623 int ttm_flags,
624 enum ttm_caching_state cstate,
625 unsigned count)
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000626{
627 unsigned long irq_flags;
628 struct list_head *p;
629 unsigned i;
630
631 spin_lock_irqsave(&pool->lock, irq_flags);
632 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
633
634 if (count >= pool->npages) {
635 /* take all pages from the pool */
636 list_splice_init(&pool->list, pages);
637 count -= pool->npages;
638 pool->npages = 0;
639 goto out;
640 }
641 /* find the last pages to include for requested number of pages. Split
Konrad Rzeszutek Wilk0d74f862011-06-08 17:06:15 +0000642 * pool to begin and halve it to reduce search space. */
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000643 if (count <= pool->npages/2) {
644 i = 0;
645 list_for_each(p, &pool->list) {
646 if (++i == count)
647 break;
648 }
649 } else {
650 i = pool->npages + 1;
651 list_for_each_prev(p, &pool->list) {
652 if (--i == count)
653 break;
654 }
655 }
Konrad Rzeszutek Wilk0d74f862011-06-08 17:06:15 +0000656 /* Cut 'count' number of pages from the pool */
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000657 list_cut_position(pages, &pool->list, p);
658 pool->npages -= count;
659 count = 0;
660out:
661 spin_unlock_irqrestore(&pool->lock, irq_flags);
662 return count;
663}
664
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500665/* Put all pages in pages list to correct pool to wait for reuse */
666static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
667 enum ttm_caching_state cstate)
668{
669 unsigned long irq_flags;
670 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
671 unsigned i;
672
673 if (pool == NULL) {
674 /* No pool for this memory type so free the pages */
675 for (i = 0; i < npages; i++) {
676 if (pages[i]) {
677 if (page_count(pages[i]) != 1)
678 printk(KERN_ERR TTM_PFX
679 "Erroneous page count. "
680 "Leaking pages.\n");
681 __free_page(pages[i]);
682 pages[i] = NULL;
683 }
684 }
685 return;
686 }
687
688 spin_lock_irqsave(&pool->lock, irq_flags);
689 for (i = 0; i < npages; i++) {
690 if (pages[i]) {
691 if (page_count(pages[i]) != 1)
692 printk(KERN_ERR TTM_PFX
693 "Erroneous page count. "
694 "Leaking pages.\n");
695 list_add_tail(&pages[i]->lru, &pool->list);
696 pages[i] = NULL;
697 pool->npages++;
698 }
699 }
700 /* Check that we don't go over the pool limit */
701 npages = 0;
702 if (pool->npages > _manager->options.max_size) {
703 npages = pool->npages - _manager->options.max_size;
704 /* free at least NUM_PAGES_TO_ALLOC number of pages
705 * to reduce calls to set_memory_wb */
706 if (npages < NUM_PAGES_TO_ALLOC)
707 npages = NUM_PAGES_TO_ALLOC;
708 }
709 spin_unlock_irqrestore(&pool->lock, irq_flags);
710 if (npages)
711 ttm_page_pool_free(pool, npages);
712}
713
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000714/*
715 * On success pages list will hold count number of correctly
716 * cached pages.
717 */
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500718static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
719 enum ttm_caching_state cstate)
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000720{
721 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
Jerome Glisse822c4d92011-11-10 18:24:09 -0500722 struct list_head plist;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000723 struct page *p = NULL;
Daniel J Blueman0e57a3c2010-09-22 17:45:45 +0100724 gfp_t gfp_flags = GFP_USER;
Jerome Glisse822c4d92011-11-10 18:24:09 -0500725 unsigned count;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000726 int r;
727
728 /* set zero flag for page allocation if required */
729 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
730 gfp_flags |= __GFP_ZERO;
731
732 /* No pool for cached pages */
733 if (pool == NULL) {
734 if (flags & TTM_PAGE_FLAG_DMA32)
735 gfp_flags |= GFP_DMA32;
736 else
Thomas Hellstrome8613c02010-05-26 16:21:03 +0200737 gfp_flags |= GFP_HIGHUSER;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000738
Jerome Glisse822c4d92011-11-10 18:24:09 -0500739 for (r = 0; r < npages; ++r) {
Dave Airlied87dfdb2011-04-13 09:15:09 +1000740 p = alloc_page(gfp_flags);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000741 if (!p) {
742
Thomas Hellstrom4abe4382010-05-26 16:21:04 +0200743 printk(KERN_ERR TTM_PFX
744 "Unable to allocate page.");
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000745 return -ENOMEM;
746 }
Dave Airlied87dfdb2011-04-13 09:15:09 +1000747
Jerome Glisse822c4d92011-11-10 18:24:09 -0500748 pages[r] = p;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000749 }
750 return 0;
751 }
752
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000753 /* combine zero flag to pool flags */
754 gfp_flags |= pool->gfp_flags;
755
756 /* First we take pages from the pool */
Jerome Glisse822c4d92011-11-10 18:24:09 -0500757 INIT_LIST_HEAD(&plist);
758 npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
759 count = 0;
760 list_for_each_entry(p, &plist, lru) {
761 pages[count++] = p;
762 }
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000763
764 /* clear the pages coming from the pool if requested */
765 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
Jerome Glisse822c4d92011-11-10 18:24:09 -0500766 list_for_each_entry(p, &plist, lru) {
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000767 clear_page(page_address(p));
768 }
769 }
770
771 /* If pool didn't have enough pages allocate new one. */
Jerome Glisse822c4d92011-11-10 18:24:09 -0500772 if (npages > 0) {
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000773 /* ttm_alloc_new_pages doesn't reference pool so we can run
774 * multiple requests in parallel.
775 **/
Jerome Glisse822c4d92011-11-10 18:24:09 -0500776 INIT_LIST_HEAD(&plist);
777 r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
778 list_for_each_entry(p, &plist, lru) {
779 pages[count++] = p;
780 }
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000781 if (r) {
782 /* If there is any pages in the list put them back to
783 * the pool. */
Thomas Hellstrom4abe4382010-05-26 16:21:04 +0200784 printk(KERN_ERR TTM_PFX
785 "Failed to allocate extra pages "
786 "for large request.");
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500787 ttm_put_pages(pages, count, flags, cstate);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000788 return r;
789 }
790 }
791
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000792 return 0;
793}
794
Pauli Nieminen07458662010-04-01 12:44:58 +0000795static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
796 char *name)
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000797{
798 spin_lock_init(&pool->lock);
799 pool->fill_lock = false;
800 INIT_LIST_HEAD(&pool->list);
Pauli Nieminen07458662010-04-01 12:44:58 +0000801 pool->npages = pool->nfrees = 0;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000802 pool->gfp_flags = flags;
Pauli Nieminen07458662010-04-01 12:44:58 +0000803 pool->name = name;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000804}
805
Pauli Nieminenc96af792010-04-01 12:45:03 +0000806int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000807{
Pauli Nieminenc96af792010-04-01 12:45:03 +0000808 int ret;
Francisco Jerez5870a4d2010-07-04 04:03:07 +0200809
810 WARN_ON(_manager);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000811
Thomas Hellstrom4abe4382010-05-26 16:21:04 +0200812 printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000813
Francisco Jerez5870a4d2010-07-04 04:03:07 +0200814 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000815
Francisco Jerez5870a4d2010-07-04 04:03:07 +0200816 ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000817
Francisco Jerez5870a4d2010-07-04 04:03:07 +0200818 ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000819
Francisco Jerez5870a4d2010-07-04 04:03:07 +0200820 ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
821 GFP_USER | GFP_DMA32, "wc dma");
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000822
Francisco Jerez5870a4d2010-07-04 04:03:07 +0200823 ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
824 GFP_USER | GFP_DMA32, "uc dma");
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000825
Francisco Jerez5870a4d2010-07-04 04:03:07 +0200826 _manager->options.max_size = max_pages;
827 _manager->options.small = SMALL_ALLOCATION;
828 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
829
830 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
831 &glob->kobj, "pool");
Pauli Nieminenc96af792010-04-01 12:45:03 +0000832 if (unlikely(ret != 0)) {
Francisco Jerez5870a4d2010-07-04 04:03:07 +0200833 kobject_put(&_manager->kobj);
834 _manager = NULL;
Pauli Nieminenc96af792010-04-01 12:45:03 +0000835 return ret;
836 }
837
Francisco Jerez5870a4d2010-07-04 04:03:07 +0200838 ttm_pool_mm_shrink_init(_manager);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000839
840 return 0;
841}
842
Daniel J Blueman0e57a3c2010-09-22 17:45:45 +0100843void ttm_page_alloc_fini(void)
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000844{
845 int i;
846
Thomas Hellstrom4abe4382010-05-26 16:21:04 +0200847 printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
Francisco Jerez5870a4d2010-07-04 04:03:07 +0200848 ttm_pool_mm_shrink_fini(_manager);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000849
850 for (i = 0; i < NUM_POOLS; ++i)
Francisco Jerez5870a4d2010-07-04 04:03:07 +0200851 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
Pauli Nieminenc96af792010-04-01 12:45:03 +0000852
Francisco Jerez5870a4d2010-07-04 04:03:07 +0200853 kobject_put(&_manager->kobj);
854 _manager = NULL;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000855}
Pauli Nieminen07458662010-04-01 12:44:58 +0000856
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400857int ttm_pool_populate(struct ttm_tt *ttm)
858{
859 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
860 unsigned i;
861 int ret;
862
863 if (ttm->state != tt_unpopulated)
864 return 0;
865
866 for (i = 0; i < ttm->num_pages; ++i) {
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500867 ret = ttm_get_pages(&ttm->pages[i], 1,
868 ttm->page_flags,
869 ttm->caching_state);
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400870 if (ret != 0) {
871 ttm_pool_unpopulate(ttm);
872 return -ENOMEM;
873 }
874
875 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
876 false, false);
877 if (unlikely(ret != 0)) {
878 ttm_pool_unpopulate(ttm);
879 return -ENOMEM;
880 }
881 }
882
883 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
884 ret = ttm_tt_swapin(ttm);
885 if (unlikely(ret != 0)) {
886 ttm_pool_unpopulate(ttm);
887 return ret;
888 }
889 }
890
891 ttm->state = tt_unbound;
892 return 0;
893}
894EXPORT_SYMBOL(ttm_pool_populate);
895
896void ttm_pool_unpopulate(struct ttm_tt *ttm)
897{
898 unsigned i;
899
900 for (i = 0; i < ttm->num_pages; ++i) {
901 if (ttm->pages[i]) {
902 ttm_mem_global_free_page(ttm->glob->mem_glob,
903 ttm->pages[i]);
904 ttm_put_pages(&ttm->pages[i], 1,
905 ttm->page_flags,
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500906 ttm->caching_state);
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400907 }
908 }
909 ttm->state = tt_unpopulated;
910}
911EXPORT_SYMBOL(ttm_pool_unpopulate);
912
Pauli Nieminen07458662010-04-01 12:44:58 +0000913int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
914{
915 struct ttm_page_pool *p;
916 unsigned i;
917 char *h[] = {"pool", "refills", "pages freed", "size"};
Francisco Jerez5870a4d2010-07-04 04:03:07 +0200918 if (!_manager) {
Pauli Nieminen07458662010-04-01 12:44:58 +0000919 seq_printf(m, "No pool allocator running.\n");
920 return 0;
921 }
922 seq_printf(m, "%6s %12s %13s %8s\n",
923 h[0], h[1], h[2], h[3]);
924 for (i = 0; i < NUM_POOLS; ++i) {
Francisco Jerez5870a4d2010-07-04 04:03:07 +0200925 p = &_manager->pools[i];
Pauli Nieminen07458662010-04-01 12:44:58 +0000926
927 seq_printf(m, "%6s %12ld %13ld %8d\n",
928 p->name, p->nrefills,
929 p->nfrees, p->npages);
930 }
931 return 0;
932}
933EXPORT_SYMBOL(ttm_page_alloc_debugfs);