blob: 45b8d7b754d2e2eac56a99a121f989387aa3276e [file] [log] [blame]
Rebecca Schultz Zavin050372e2012-06-07 16:36:44 -07001/*
2 * drivers/gpu/ion/ion_mem_pool.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/dma-mapping.h>
18#include <linux/err.h>
19#include <linux/list.h>
20#include <linux/slab.h>
21#include <linux/shrinker.h>
22#include "ion_priv.h"
23
24struct ion_page_pool_item {
25 struct page *page;
26 struct list_head list;
27};
28
29static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
30{
31 struct page *page = alloc_pages(pool->gfp_mask, pool->order);
32 struct scatterlist sg;
33
34 if (!page)
35 return NULL;
36
37 sg_init_table(&sg, 1);
38 sg_set_page(&sg, page, PAGE_SIZE << pool->order, 0);
39 dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
40
41 return page;
42}
43
44static void ion_page_pool_free_pages(struct ion_page_pool *pool,
45 struct page *page)
46{
47 __free_pages(page, pool->order);
48}
49
50static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
51{
52 struct ion_page_pool_item *item;
53
54 item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL);
55 if (!item)
56 return -ENOMEM;
57 item->page = page;
Rebecca Schultz Zavin9fad2fe2012-10-08 23:01:23 -070058 if (PageHighMem(page)) {
59 list_add_tail(&item->list, &pool->high_items);
60 pool->high_count++;
61 } else {
62 list_add_tail(&item->list, &pool->low_items);
63 pool->low_count++;
64 }
Rebecca Schultz Zavin050372e2012-06-07 16:36:44 -070065 return 0;
66}
67
Rebecca Schultz Zavin9fad2fe2012-10-08 23:01:23 -070068static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
Rebecca Schultz Zavin050372e2012-06-07 16:36:44 -070069{
70 struct ion_page_pool_item *item;
71 struct page *page;
72
Rebecca Schultz Zavin9fad2fe2012-10-08 23:01:23 -070073 if (high) {
74 BUG_ON(!pool->high_count);
75 item = list_first_entry(&pool->high_items,
76 struct ion_page_pool_item, list);
77 pool->high_count--;
78 } else {
79 BUG_ON(!pool->low_count);
80 item = list_first_entry(&pool->low_items,
81 struct ion_page_pool_item, list);
82 pool->low_count--;
83 }
Rebecca Schultz Zavin050372e2012-06-07 16:36:44 -070084
Rebecca Schultz Zavin050372e2012-06-07 16:36:44 -070085 list_del(&item->list);
86 page = item->page;
87 kfree(item);
Rebecca Schultz Zavin050372e2012-06-07 16:36:44 -070088 return page;
89}
90
91void *ion_page_pool_alloc(struct ion_page_pool *pool)
92{
93 struct page *page = NULL;
94
95 BUG_ON(!pool);
96
97 mutex_lock(&pool->mutex);
Rebecca Schultz Zavin9fad2fe2012-10-08 23:01:23 -070098 if (pool->high_count)
99 page = ion_page_pool_remove(pool, true);
100 else if (pool->low_count)
101 page = ion_page_pool_remove(pool, false);
Rebecca Schultz Zavin050372e2012-06-07 16:36:44 -0700102 mutex_unlock(&pool->mutex);
103
Rebecca Schultz Zavin9fad2fe2012-10-08 23:01:23 -0700104 if (!page)
105 page = ion_page_pool_alloc_pages(pool);
106
Rebecca Schultz Zavin050372e2012-06-07 16:36:44 -0700107 return page;
108}
109
110void ion_page_pool_free(struct ion_page_pool *pool, struct page* page)
111{
112 int ret;
113
114 mutex_lock(&pool->mutex);
115 ret = ion_page_pool_add(pool, page);
Rebecca Schultz Zavin9fad2fe2012-10-08 23:01:23 -0700116 mutex_unlock(&pool->mutex);
Rebecca Schultz Zavin050372e2012-06-07 16:36:44 -0700117 if (ret)
118 ion_page_pool_free_pages(pool, page);
Rebecca Schultz Zavin050372e2012-06-07 16:36:44 -0700119}
120
121static int ion_page_pool_shrink(struct shrinker *shrinker,
122 struct shrink_control *sc)
123{
124 struct ion_page_pool *pool = container_of(shrinker,
125 struct ion_page_pool,
126 shrinker);
127 int nr_freed = 0;
128 int i;
Rebecca Schultz Zavin9fad2fe2012-10-08 23:01:23 -0700129 bool high;
130
131 if (sc->gfp_mask & __GFP_HIGHMEM)
132 high = true;
Rebecca Schultz Zavin050372e2012-06-07 16:36:44 -0700133
134 if (sc->nr_to_scan == 0)
Rebecca Schultz Zavin9fad2fe2012-10-08 23:01:23 -0700135 return high ? (pool->high_count + pool->low_count) *
136 (1 << pool->order) :
137 pool->low_count * (1 << pool->order);
Rebecca Schultz Zavin050372e2012-06-07 16:36:44 -0700138
Rebecca Schultz Zavin9fad2fe2012-10-08 23:01:23 -0700139 for (i = 0; i < sc->nr_to_scan; i++) {
Rebecca Schultz Zavin050372e2012-06-07 16:36:44 -0700140 struct page *page;
141
Rebecca Schultz Zavin9fad2fe2012-10-08 23:01:23 -0700142 mutex_lock(&pool->mutex);
143 if (high && pool->high_count) {
144 page = ion_page_pool_remove(pool, true);
145 } else if (pool->low_count) {
146 page = ion_page_pool_remove(pool, false);
147 } else {
148 mutex_unlock(&pool->mutex);
149 break;
Rebecca Schultz Zavin050372e2012-06-07 16:36:44 -0700150 }
Rebecca Schultz Zavin9fad2fe2012-10-08 23:01:23 -0700151 mutex_unlock(&pool->mutex);
Rebecca Schultz Zavin050372e2012-06-07 16:36:44 -0700152 ion_page_pool_free_pages(pool, page);
153 nr_freed += (1 << pool->order);
154 }
155 pr_info("%s: shrunk page_pool of order %d by %d pages\n", __func__,
156 pool->order, nr_freed);
Rebecca Schultz Zavin050372e2012-06-07 16:36:44 -0700157
Rebecca Schultz Zavin9fad2fe2012-10-08 23:01:23 -0700158 return high ? (pool->high_count + pool->low_count) *
159 (1 << pool->order) :
160 pool->low_count * (1 << pool->order);
Rebecca Schultz Zavin050372e2012-06-07 16:36:44 -0700161}
162
163struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
164{
165 struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
166 GFP_KERNEL);
167 if (!pool)
168 return NULL;
Rebecca Schultz Zavin9fad2fe2012-10-08 23:01:23 -0700169 pool->high_count = 0;
170 pool->low_count = 0;
171 INIT_LIST_HEAD(&pool->low_items);
172 INIT_LIST_HEAD(&pool->high_items);
Rebecca Schultz Zavin050372e2012-06-07 16:36:44 -0700173 pool->shrinker.shrink = ion_page_pool_shrink;
174 pool->shrinker.seeks = DEFAULT_SEEKS * 16;
175 pool->shrinker.batch = 0;
176 register_shrinker(&pool->shrinker);
177 pool->gfp_mask = gfp_mask;
178 pool->order = order;
179 mutex_init(&pool->mutex);
180
181 return pool;
182}
183
184void ion_page_pool_destroy(struct ion_page_pool *pool)
185{
186 unregister_shrinker(&pool->shrinker);
187 kfree(pool);
188}
189