blob: 04c861a95f60c7bf2e8e36e809fef4e46fe28bf6 [file] [log] [blame]
Rebecca Schultz Zavin050372e2012-06-07 16:36:44 -07001/*
2 * drivers/gpu/ion/ion_mem_pool.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/dma-mapping.h>
18#include <linux/err.h>
19#include <linux/list.h>
20#include <linux/slab.h>
21#include <linux/shrinker.h>
22#include "ion_priv.h"
23
24struct ion_page_pool_item {
25 struct page *page;
26 struct list_head list;
27};
28
29static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
30{
31 struct page *page = alloc_pages(pool->gfp_mask, pool->order);
32 struct scatterlist sg;
33
34 if (!page)
35 return NULL;
36
37 sg_init_table(&sg, 1);
38 sg_set_page(&sg, page, PAGE_SIZE << pool->order, 0);
39 dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
40
41 return page;
42}
43
44static void ion_page_pool_free_pages(struct ion_page_pool *pool,
45 struct page *page)
46{
47 __free_pages(page, pool->order);
48}
49
50static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
51{
52 struct ion_page_pool_item *item;
53
54 item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL);
55 if (!item)
56 return -ENOMEM;
57 item->page = page;
58 list_add_tail(&item->list, &pool->items);
59 pool->count++;
60 return 0;
61}
62
63static struct page *ion_page_pool_remove(struct ion_page_pool *pool)
64{
65 struct ion_page_pool_item *item;
66 struct page *page;
67
68 BUG_ON(!pool->count);
69 BUG_ON(list_empty(&pool->items));
70
71 item = list_first_entry(&pool->items, struct ion_page_pool_item, list);
72 list_del(&item->list);
73 page = item->page;
74 kfree(item);
75 pool->count--;
76 return page;
77}
78
79void *ion_page_pool_alloc(struct ion_page_pool *pool)
80{
81 struct page *page = NULL;
82
83 BUG_ON(!pool);
84
85 mutex_lock(&pool->mutex);
86 if (pool->count)
87 page = ion_page_pool_remove(pool);
88 else
89 page = ion_page_pool_alloc_pages(pool);
90 mutex_unlock(&pool->mutex);
91
92 return page;
93}
94
95void ion_page_pool_free(struct ion_page_pool *pool, struct page* page)
96{
97 int ret;
98
99 mutex_lock(&pool->mutex);
100 ret = ion_page_pool_add(pool, page);
101 if (ret)
102 ion_page_pool_free_pages(pool, page);
103 mutex_unlock(&pool->mutex);
104}
105
106static int ion_page_pool_shrink(struct shrinker *shrinker,
107 struct shrink_control *sc)
108{
109 struct ion_page_pool *pool = container_of(shrinker,
110 struct ion_page_pool,
111 shrinker);
112 int nr_freed = 0;
113 int i;
114
115 if (sc->nr_to_scan == 0)
116 return pool->count * (1 << pool->order);
117
118 mutex_lock(&pool->mutex);
119 for (i = 0; i < sc->nr_to_scan && pool->count; i++) {
120 struct ion_page_pool_item *item;
121 struct page *page;
122
123 item = list_first_entry(&pool->items, struct ion_page_pool_item, list);
124 page = item->page;
125 if (PageHighMem(page) && !(sc->gfp_mask & __GFP_HIGHMEM)) {
126 list_move_tail(&item->list, &pool->items);
127 continue;
128 }
129 BUG_ON(page != ion_page_pool_remove(pool));
130 ion_page_pool_free_pages(pool, page);
131 nr_freed += (1 << pool->order);
132 }
133 pr_info("%s: shrunk page_pool of order %d by %d pages\n", __func__,
134 pool->order, nr_freed);
135 mutex_unlock(&pool->mutex);
136
137 return pool->count * (1 << pool->order);
138}
139
140struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
141{
142 struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
143 GFP_KERNEL);
144 if (!pool)
145 return NULL;
146 pool->count = 0;
147 INIT_LIST_HEAD(&pool->items);
148 pool->shrinker.shrink = ion_page_pool_shrink;
149 pool->shrinker.seeks = DEFAULT_SEEKS * 16;
150 pool->shrinker.batch = 0;
151 register_shrinker(&pool->shrinker);
152 pool->gfp_mask = gfp_mask;
153 pool->order = order;
154 mutex_init(&pool->mutex);
155
156 return pool;
157}
158
159void ion_page_pool_destroy(struct ion_page_pool *pool)
160{
161 unregister_shrinker(&pool->shrinker);
162 kfree(pool);
163}
164