blob: 296c74f98dc08c6cabbd8203fc4b535b59aedf30 [file] [log] [blame]
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001/*
2 * drivers/staging/android/ion/ion_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/err.h>
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -080018#include <linux/freezer.h>
19#include <linux/kthread.h>
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080020#include <linux/mm.h>
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -080021#include <linux/rtmutex.h>
22#include <linux/sched.h>
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080023#include <linux/scatterlist.h>
24#include <linux/vmalloc.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080025#include "ion.h"
26#include "ion_priv.h"
27
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080028void *ion_heap_map_kernel(struct ion_heap *heap,
29 struct ion_buffer *buffer)
30{
31 struct scatterlist *sg;
32 int i, j;
33 void *vaddr;
34 pgprot_t pgprot;
35 struct sg_table *table = buffer->sg_table;
36 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
37 struct page **pages = vmalloc(sizeof(struct page *) * npages);
38 struct page **tmp = pages;
39
40 if (!pages)
Colin Crossf63958d2013-12-13 19:26:28 -080041 return NULL;
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080042
43 if (buffer->flags & ION_FLAG_CACHED)
44 pgprot = PAGE_KERNEL;
45 else
46 pgprot = pgprot_writecombine(PAGE_KERNEL);
47
48 for_each_sg(table->sgl, sg, table->nents, i) {
Colin Cross06e0dca2013-12-13 14:25:02 -080049 int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080050 struct page *page = sg_page(sg);
51 BUG_ON(i >= npages);
John Stultze1d855b2013-12-13 19:26:33 -080052 for (j = 0; j < npages_this_entry; j++)
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080053 *(tmp++) = page++;
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080054 }
55 vaddr = vmap(pages, npages, VM_MAP, pgprot);
56 vfree(pages);
57
Colin Crossdfc4a9b2013-12-13 14:24:48 -080058 if (vaddr == NULL)
59 return ERR_PTR(-ENOMEM);
60
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080061 return vaddr;
62}
63
64void ion_heap_unmap_kernel(struct ion_heap *heap,
65 struct ion_buffer *buffer)
66{
67 vunmap(buffer->vaddr);
68}
69
70int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
71 struct vm_area_struct *vma)
72{
73 struct sg_table *table = buffer->sg_table;
74 unsigned long addr = vma->vm_start;
75 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
76 struct scatterlist *sg;
77 int i;
Colin Crosse460bc52013-12-13 19:26:23 -080078 int ret;
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080079
80 for_each_sg(table->sgl, sg, table->nents, i) {
81 struct page *page = sg_page(sg);
82 unsigned long remainder = vma->vm_end - addr;
Colin Cross06e0dca2013-12-13 14:25:02 -080083 unsigned long len = sg->length;
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080084
Colin Cross06e0dca2013-12-13 14:25:02 -080085 if (offset >= sg->length) {
86 offset -= sg->length;
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080087 continue;
88 } else if (offset) {
89 page += offset / PAGE_SIZE;
Colin Cross06e0dca2013-12-13 14:25:02 -080090 len = sg->length - offset;
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080091 offset = 0;
92 }
93 len = min(len, remainder);
Colin Crosse460bc52013-12-13 19:26:23 -080094 ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080095 vma->vm_page_prot);
Colin Crosse460bc52013-12-13 19:26:23 -080096 if (ret)
97 return ret;
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080098 addr += len;
99 if (addr >= vma->vm_end)
100 return 0;
101 }
102 return 0;
103}
104
Colin Cross8b312bb2013-12-13 19:26:21 -0800105static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
106{
107 void *addr = vm_map_ram(pages, num, -1, pgprot);
108 if (!addr)
109 return -ENOMEM;
110 memset(addr, 0, PAGE_SIZE * num);
111 vm_unmap_ram(addr, num);
112
113 return 0;
114}
115
Colin Crossdf6cf5c2013-12-13 19:26:30 -0800116static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
117 pgprot_t pgprot)
118{
119 int p = 0;
120 int ret = 0;
121 struct sg_page_iter piter;
122 struct page *pages[32];
123
124 for_each_sg_page(sgl, &piter, nents, 0) {
125 pages[p++] = sg_page_iter_page(&piter);
126 if (p == ARRAY_SIZE(pages)) {
127 ret = ion_heap_clear_pages(pages, p, pgprot);
128 if (ret)
129 return ret;
130 p = 0;
131 }
132 }
133 if (p)
134 ret = ion_heap_clear_pages(pages, p, pgprot);
135
136 return ret;
137}
138
Rebecca Schultz Zavin0b6b2cd2013-12-13 14:24:32 -0800139int ion_heap_buffer_zero(struct ion_buffer *buffer)
140{
141 struct sg_table *table = buffer->sg_table;
142 pgprot_t pgprot;
Rebecca Schultz Zavin0b6b2cd2013-12-13 14:24:32 -0800143
144 if (buffer->flags & ION_FLAG_CACHED)
145 pgprot = PAGE_KERNEL;
146 else
147 pgprot = pgprot_writecombine(PAGE_KERNEL);
148
Colin Crossdf6cf5c2013-12-13 19:26:30 -0800149 return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
150}
Rebecca Schultz Zavin0b6b2cd2013-12-13 14:24:32 -0800151
Colin Crossdf6cf5c2013-12-13 19:26:30 -0800152int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
153{
154 struct scatterlist sg;
155
156 sg_init_table(&sg, 1);
157 sg_set_page(&sg, page, size, 0);
158 return ion_heap_sglist_zero(&sg, 1, pgprot);
Rebecca Schultz Zavin0b6b2cd2013-12-13 14:24:32 -0800159}
160
John Stultze1d855b2013-12-13 19:26:33 -0800161void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800162{
John Stultz6a72a702013-12-17 17:04:29 -0800163 spin_lock(&heap->free_lock);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800164 list_add(&buffer->list, &heap->free_list);
165 heap->free_list_size += buffer->size;
John Stultz6a72a702013-12-17 17:04:29 -0800166 spin_unlock(&heap->free_lock);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800167 wake_up(&heap->waitqueue);
168}
169
170size_t ion_heap_freelist_size(struct ion_heap *heap)
171{
172 size_t size;
173
John Stultz6a72a702013-12-17 17:04:29 -0800174 spin_lock(&heap->free_lock);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800175 size = heap->free_list_size;
John Stultz6a72a702013-12-17 17:04:29 -0800176 spin_unlock(&heap->free_lock);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800177
178 return size;
179}
180
181size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
182{
John Stultz6a72a702013-12-17 17:04:29 -0800183 struct ion_buffer *buffer;
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800184 size_t total_drained = 0;
185
186 if (ion_heap_freelist_size(heap) == 0)
187 return 0;
188
John Stultz6a72a702013-12-17 17:04:29 -0800189 spin_lock(&heap->free_lock);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800190 if (size == 0)
191 size = heap->free_list_size;
192
John Stultz6a72a702013-12-17 17:04:29 -0800193 while (!list_empty(&heap->free_list)) {
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800194 if (total_drained >= size)
195 break;
John Stultz6a72a702013-12-17 17:04:29 -0800196 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
197 list);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800198 list_del(&buffer->list);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800199 heap->free_list_size -= buffer->size;
200 total_drained += buffer->size;
John Stultz6a72a702013-12-17 17:04:29 -0800201 spin_unlock(&heap->free_lock);
Mitchel Humpherysf020b442013-12-13 19:26:17 -0800202 ion_buffer_destroy(buffer);
John Stultz6a72a702013-12-17 17:04:29 -0800203 spin_lock(&heap->free_lock);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800204 }
John Stultz6a72a702013-12-17 17:04:29 -0800205 spin_unlock(&heap->free_lock);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800206
207 return total_drained;
208}
209
Colin Crossf63958d2013-12-13 19:26:28 -0800210static int ion_heap_deferred_free(void *data)
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800211{
212 struct ion_heap *heap = data;
213
214 while (true) {
215 struct ion_buffer *buffer;
216
217 wait_event_freezable(heap->waitqueue,
218 ion_heap_freelist_size(heap) > 0);
219
John Stultz6a72a702013-12-17 17:04:29 -0800220 spin_lock(&heap->free_lock);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800221 if (list_empty(&heap->free_list)) {
John Stultz6a72a702013-12-17 17:04:29 -0800222 spin_unlock(&heap->free_lock);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800223 continue;
224 }
225 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
226 list);
227 list_del(&buffer->list);
228 heap->free_list_size -= buffer->size;
John Stultz6a72a702013-12-17 17:04:29 -0800229 spin_unlock(&heap->free_lock);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800230 ion_buffer_destroy(buffer);
231 }
232
233 return 0;
234}
235
236int ion_heap_init_deferred_free(struct ion_heap *heap)
237{
238 struct sched_param param = { .sched_priority = 0 };
239
240 INIT_LIST_HEAD(&heap->free_list);
241 heap->free_list_size = 0;
John Stultz6a72a702013-12-17 17:04:29 -0800242 spin_lock_init(&heap->free_lock);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800243 init_waitqueue_head(&heap->waitqueue);
244 heap->task = kthread_run(ion_heap_deferred_free, heap,
245 "%s", heap->name);
246 sched_setscheduler(heap->task, SCHED_IDLE, &param);
247 if (IS_ERR(heap->task)) {
248 pr_err("%s: creating thread for deferred free failed\n",
249 __func__);
250 return PTR_RET(heap->task);
251 }
252 return 0;
253}
254
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800255struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
256{
257 struct ion_heap *heap = NULL;
258
259 switch (heap_data->type) {
260 case ION_HEAP_TYPE_SYSTEM_CONTIG:
261 heap = ion_system_contig_heap_create(heap_data);
262 break;
263 case ION_HEAP_TYPE_SYSTEM:
264 heap = ion_system_heap_create(heap_data);
265 break;
266 case ION_HEAP_TYPE_CARVEOUT:
267 heap = ion_carveout_heap_create(heap_data);
268 break;
Rebecca Schultz Zavine3c2eb72013-12-13 14:24:27 -0800269 case ION_HEAP_TYPE_CHUNK:
270 heap = ion_chunk_heap_create(heap_data);
271 break;
Benjamin Gaignard349c9e12013-12-13 14:24:44 -0800272 case ION_HEAP_TYPE_DMA:
273 heap = ion_cma_heap_create(heap_data);
274 break;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800275 default:
276 pr_err("%s: Invalid heap type %d\n", __func__,
277 heap_data->type);
278 return ERR_PTR(-EINVAL);
279 }
280
281 if (IS_ERR_OR_NULL(heap)) {
Colin Crosse61fc912013-12-13 19:26:14 -0800282 pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800283 __func__, heap_data->name, heap_data->type,
284 heap_data->base, heap_data->size);
285 return ERR_PTR(-EINVAL);
286 }
287
288 heap->name = heap_data->name;
289 heap->id = heap_data->id;
290 return heap;
291}
292
293void ion_heap_destroy(struct ion_heap *heap)
294{
295 if (!heap)
296 return;
297
298 switch (heap->type) {
299 case ION_HEAP_TYPE_SYSTEM_CONTIG:
300 ion_system_contig_heap_destroy(heap);
301 break;
302 case ION_HEAP_TYPE_SYSTEM:
303 ion_system_heap_destroy(heap);
304 break;
305 case ION_HEAP_TYPE_CARVEOUT:
306 ion_carveout_heap_destroy(heap);
307 break;
Rebecca Schultz Zavine3c2eb72013-12-13 14:24:27 -0800308 case ION_HEAP_TYPE_CHUNK:
309 ion_chunk_heap_destroy(heap);
310 break;
Benjamin Gaignard349c9e12013-12-13 14:24:44 -0800311 case ION_HEAP_TYPE_DMA:
312 ion_cma_heap_destroy(heap);
313 break;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800314 default:
315 pr_err("%s: Invalid heap type %d\n", __func__,
316 heap->type);
317 }
318}