blob: c2a7cb95725b8b25cfb7ef1be80487ec78d5e36b [file] [log] [blame]
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001/*
2 * drivers/staging/android/ion/ion_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/err.h>
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -080018#include <linux/freezer.h>
19#include <linux/kthread.h>
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080020#include <linux/mm.h>
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -080021#include <linux/rtmutex.h>
22#include <linux/sched.h>
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080023#include <linux/scatterlist.h>
24#include <linux/vmalloc.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080025#include "ion.h"
26#include "ion_priv.h"
27
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080028void *ion_heap_map_kernel(struct ion_heap *heap,
29 struct ion_buffer *buffer)
30{
31 struct scatterlist *sg;
32 int i, j;
33 void *vaddr;
34 pgprot_t pgprot;
35 struct sg_table *table = buffer->sg_table;
36 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
37 struct page **pages = vmalloc(sizeof(struct page *) * npages);
38 struct page **tmp = pages;
39
40 if (!pages)
Laura Abbott06bef9e2018-06-11 11:06:53 -070041 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080042
43 if (buffer->flags & ION_FLAG_CACHED)
44 pgprot = PAGE_KERNEL;
45 else
46 pgprot = pgprot_writecombine(PAGE_KERNEL);
47
48 for_each_sg(table->sgl, sg, table->nents, i) {
Colin Cross06e0dca2013-12-13 14:25:02 -080049 int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080050 struct page *page = sg_page(sg);
Seunghun Lee10f62862014-05-01 01:30:23 +090051
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080052 BUG_ON(i >= npages);
John Stultze1d855b2013-12-13 19:26:33 -080053 for (j = 0; j < npages_this_entry; j++)
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080054 *(tmp++) = page++;
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080055 }
56 vaddr = vmap(pages, npages, VM_MAP, pgprot);
57 vfree(pages);
58
Muhammad Falak R Wanicf313782015-10-19 22:37:51 +053059 if (!vaddr)
Colin Crossdfc4a9b2013-12-13 14:24:48 -080060 return ERR_PTR(-ENOMEM);
61
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080062 return vaddr;
63}
64
65void ion_heap_unmap_kernel(struct ion_heap *heap,
66 struct ion_buffer *buffer)
67{
68 vunmap(buffer->vaddr);
69}
70
71int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
72 struct vm_area_struct *vma)
73{
74 struct sg_table *table = buffer->sg_table;
75 unsigned long addr = vma->vm_start;
76 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
77 struct scatterlist *sg;
78 int i;
Colin Crosse460bc52013-12-13 19:26:23 -080079 int ret;
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080080
81 for_each_sg(table->sgl, sg, table->nents, i) {
82 struct page *page = sg_page(sg);
83 unsigned long remainder = vma->vm_end - addr;
Colin Cross06e0dca2013-12-13 14:25:02 -080084 unsigned long len = sg->length;
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080085
Colin Cross06e0dca2013-12-13 14:25:02 -080086 if (offset >= sg->length) {
87 offset -= sg->length;
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080088 continue;
89 } else if (offset) {
90 page += offset / PAGE_SIZE;
Colin Cross06e0dca2013-12-13 14:25:02 -080091 len = sg->length - offset;
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080092 offset = 0;
93 }
94 len = min(len, remainder);
Colin Crosse460bc52013-12-13 19:26:23 -080095 ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
Ben LeMasurier33a59562016-08-16 21:02:00 -060096 vma->vm_page_prot);
Colin Crosse460bc52013-12-13 19:26:23 -080097 if (ret)
98 return ret;
Rebecca Schultz Zavin88982272013-12-13 14:24:26 -080099 addr += len;
100 if (addr >= vma->vm_end)
101 return 0;
102 }
103 return 0;
104}
105
Colin Cross8b312bb2013-12-13 19:26:21 -0800106static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
107{
108 void *addr = vm_map_ram(pages, num, -1, pgprot);
Seunghun Lee10f62862014-05-01 01:30:23 +0900109
Colin Cross8b312bb2013-12-13 19:26:21 -0800110 if (!addr)
111 return -ENOMEM;
112 memset(addr, 0, PAGE_SIZE * num);
113 vm_unmap_ram(addr, num);
114
115 return 0;
116}
117
Colin Crossdf6cf5c2013-12-13 19:26:30 -0800118static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
Ben LeMasurier33a59562016-08-16 21:02:00 -0600119 pgprot_t pgprot)
Colin Crossdf6cf5c2013-12-13 19:26:30 -0800120{
121 int p = 0;
122 int ret = 0;
123 struct sg_page_iter piter;
124 struct page *pages[32];
125
126 for_each_sg_page(sgl, &piter, nents, 0) {
127 pages[p++] = sg_page_iter_page(&piter);
128 if (p == ARRAY_SIZE(pages)) {
129 ret = ion_heap_clear_pages(pages, p, pgprot);
130 if (ret)
131 return ret;
132 p = 0;
133 }
134 }
135 if (p)
136 ret = ion_heap_clear_pages(pages, p, pgprot);
137
138 return ret;
139}
140
Rebecca Schultz Zavin0b6b2cd2013-12-13 14:24:32 -0800141int ion_heap_buffer_zero(struct ion_buffer *buffer)
142{
143 struct sg_table *table = buffer->sg_table;
144 pgprot_t pgprot;
Rebecca Schultz Zavin0b6b2cd2013-12-13 14:24:32 -0800145
146 if (buffer->flags & ION_FLAG_CACHED)
147 pgprot = PAGE_KERNEL;
148 else
149 pgprot = pgprot_writecombine(PAGE_KERNEL);
150
Colin Crossdf6cf5c2013-12-13 19:26:30 -0800151 return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
152}
Rebecca Schultz Zavin0b6b2cd2013-12-13 14:24:32 -0800153
Colin Crossdf6cf5c2013-12-13 19:26:30 -0800154int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
155{
156 struct scatterlist sg;
157
158 sg_init_table(&sg, 1);
159 sg_set_page(&sg, page, size, 0);
160 return ion_heap_sglist_zero(&sg, 1, pgprot);
Rebecca Schultz Zavin0b6b2cd2013-12-13 14:24:32 -0800161}
162
John Stultze1d855b2013-12-13 19:26:33 -0800163void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800164{
John Stultz6a72a702013-12-17 17:04:29 -0800165 spin_lock(&heap->free_lock);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800166 list_add(&buffer->list, &heap->free_list);
167 heap->free_list_size += buffer->size;
John Stultz6a72a702013-12-17 17:04:29 -0800168 spin_unlock(&heap->free_lock);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800169 wake_up(&heap->waitqueue);
170}
171
172size_t ion_heap_freelist_size(struct ion_heap *heap)
173{
174 size_t size;
175
John Stultz6a72a702013-12-17 17:04:29 -0800176 spin_lock(&heap->free_lock);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800177 size = heap->free_list_size;
John Stultz6a72a702013-12-17 17:04:29 -0800178 spin_unlock(&heap->free_lock);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800179
180 return size;
181}
182
Mitchel Humpherys53a91c62014-02-17 13:58:39 -0800183static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
Ben LeMasurier33a59562016-08-16 21:02:00 -0600184 bool skip_pools)
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800185{
John Stultz6a72a702013-12-17 17:04:29 -0800186 struct ion_buffer *buffer;
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800187 size_t total_drained = 0;
188
189 if (ion_heap_freelist_size(heap) == 0)
190 return 0;
191
John Stultz6a72a702013-12-17 17:04:29 -0800192 spin_lock(&heap->free_lock);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800193 if (size == 0)
194 size = heap->free_list_size;
195
John Stultz6a72a702013-12-17 17:04:29 -0800196 while (!list_empty(&heap->free_list)) {
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800197 if (total_drained >= size)
198 break;
John Stultz6a72a702013-12-17 17:04:29 -0800199 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
200 list);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800201 list_del(&buffer->list);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800202 heap->free_list_size -= buffer->size;
Mitchel Humpherys53a91c62014-02-17 13:58:39 -0800203 if (skip_pools)
204 buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800205 total_drained += buffer->size;
John Stultz6a72a702013-12-17 17:04:29 -0800206 spin_unlock(&heap->free_lock);
Mitchel Humpherysf020b442013-12-13 19:26:17 -0800207 ion_buffer_destroy(buffer);
John Stultz6a72a702013-12-17 17:04:29 -0800208 spin_lock(&heap->free_lock);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800209 }
John Stultz6a72a702013-12-17 17:04:29 -0800210 spin_unlock(&heap->free_lock);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800211
212 return total_drained;
213}
214
Mitchel Humpherys53a91c62014-02-17 13:58:39 -0800215size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
216{
217 return _ion_heap_freelist_drain(heap, size, false);
218}
219
220size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
221{
222 return _ion_heap_freelist_drain(heap, size, true);
223}
224
Colin Crossf63958d2013-12-13 19:26:28 -0800225static int ion_heap_deferred_free(void *data)
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800226{
227 struct ion_heap *heap = data;
228
229 while (true) {
230 struct ion_buffer *buffer;
231
232 wait_event_freezable(heap->waitqueue,
233 ion_heap_freelist_size(heap) > 0);
234
John Stultz6a72a702013-12-17 17:04:29 -0800235 spin_lock(&heap->free_lock);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800236 if (list_empty(&heap->free_list)) {
John Stultz6a72a702013-12-17 17:04:29 -0800237 spin_unlock(&heap->free_lock);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800238 continue;
239 }
240 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
241 list);
242 list_del(&buffer->list);
243 heap->free_list_size -= buffer->size;
John Stultz6a72a702013-12-17 17:04:29 -0800244 spin_unlock(&heap->free_lock);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800245 ion_buffer_destroy(buffer);
246 }
247
248 return 0;
249}
250
251int ion_heap_init_deferred_free(struct ion_heap *heap)
252{
253 struct sched_param param = { .sched_priority = 0 };
254
255 INIT_LIST_HEAD(&heap->free_list);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800256 init_waitqueue_head(&heap->waitqueue);
257 heap->task = kthread_run(ion_heap_deferred_free, heap,
258 "%s", heap->name);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800259 if (IS_ERR(heap->task)) {
260 pr_err("%s: creating thread for deferred free failed\n",
261 __func__);
Sachin Kamatab0c0692014-01-27 12:17:05 +0530262 return PTR_ERR_OR_ZERO(heap->task);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800263 }
Dan Carpenter54de9af2014-01-22 17:20:03 +0300264 sched_setscheduler(heap->task, SCHED_IDLE, &param);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800265 return 0;
266}
267
Colin Crossb9daf0b2014-02-17 13:58:38 -0800268static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
Ben LeMasurier33a59562016-08-16 21:02:00 -0600269 struct shrink_control *sc)
Colin Crossb9daf0b2014-02-17 13:58:38 -0800270{
271 struct ion_heap *heap = container_of(shrinker, struct ion_heap,
272 shrinker);
273 int total = 0;
274
275 total = ion_heap_freelist_size(heap) / PAGE_SIZE;
276 if (heap->ops->shrink)
277 total += heap->ops->shrink(heap, sc->gfp_mask, 0);
278 return total;
279}
280
281static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
Ben LeMasurier33a59562016-08-16 21:02:00 -0600282 struct shrink_control *sc)
Colin Crossb9daf0b2014-02-17 13:58:38 -0800283{
284 struct ion_heap *heap = container_of(shrinker, struct ion_heap,
285 shrinker);
286 int freed = 0;
287 int to_scan = sc->nr_to_scan;
288
289 if (to_scan == 0)
290 return 0;
291
292 /*
293 * shrink the free list first, no point in zeroing the memory if we're
Mitchel Humpherys53a91c62014-02-17 13:58:39 -0800294 * just going to reclaim it. Also, skip any possible page pooling.
Colin Crossb9daf0b2014-02-17 13:58:38 -0800295 */
296 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
Mitchel Humpherys53a91c62014-02-17 13:58:39 -0800297 freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
Colin Crossb9daf0b2014-02-17 13:58:38 -0800298 PAGE_SIZE;
299
300 to_scan -= freed;
301 if (to_scan <= 0)
302 return freed;
303
304 if (heap->ops->shrink)
305 freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
306 return freed;
307}
308
309void ion_heap_init_shrinker(struct ion_heap *heap)
310{
311 heap->shrinker.count_objects = ion_heap_shrink_count;
312 heap->shrinker.scan_objects = ion_heap_shrink_scan;
313 heap->shrinker.seeks = DEFAULT_SEEKS;
314 heap->shrinker.batch = 0;
315 register_shrinker(&heap->shrinker);
316}
317
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800318struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
319{
320 struct ion_heap *heap = NULL;
321
322 switch (heap_data->type) {
323 case ION_HEAP_TYPE_SYSTEM_CONTIG:
324 heap = ion_system_contig_heap_create(heap_data);
325 break;
326 case ION_HEAP_TYPE_SYSTEM:
327 heap = ion_system_heap_create(heap_data);
328 break;
329 case ION_HEAP_TYPE_CARVEOUT:
330 heap = ion_carveout_heap_create(heap_data);
331 break;
Rebecca Schultz Zavine3c2eb72013-12-13 14:24:27 -0800332 case ION_HEAP_TYPE_CHUNK:
333 heap = ion_chunk_heap_create(heap_data);
334 break;
Benjamin Gaignard349c9e12013-12-13 14:24:44 -0800335 case ION_HEAP_TYPE_DMA:
336 heap = ion_cma_heap_create(heap_data);
337 break;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800338 default:
339 pr_err("%s: Invalid heap type %d\n", __func__,
340 heap_data->type);
341 return ERR_PTR(-EINVAL);
342 }
343
344 if (IS_ERR_OR_NULL(heap)) {
Colin Crosse61fc912013-12-13 19:26:14 -0800345 pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800346 __func__, heap_data->name, heap_data->type,
347 heap_data->base, heap_data->size);
348 return ERR_PTR(-EINVAL);
349 }
350
351 heap->name = heap_data->name;
352 heap->id = heap_data->id;
353 return heap;
354}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -0400355EXPORT_SYMBOL(ion_heap_create);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800356
357void ion_heap_destroy(struct ion_heap *heap)
358{
359 if (!heap)
360 return;
361
362 switch (heap->type) {
363 case ION_HEAP_TYPE_SYSTEM_CONTIG:
364 ion_system_contig_heap_destroy(heap);
365 break;
366 case ION_HEAP_TYPE_SYSTEM:
367 ion_system_heap_destroy(heap);
368 break;
369 case ION_HEAP_TYPE_CARVEOUT:
370 ion_carveout_heap_destroy(heap);
371 break;
Rebecca Schultz Zavine3c2eb72013-12-13 14:24:27 -0800372 case ION_HEAP_TYPE_CHUNK:
373 ion_chunk_heap_destroy(heap);
374 break;
Benjamin Gaignard349c9e12013-12-13 14:24:44 -0800375 case ION_HEAP_TYPE_DMA:
376 ion_cma_heap_destroy(heap);
377 break;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800378 default:
379 pr_err("%s: Invalid heap type %d\n", __func__,
380 heap->type);
381 }
382}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -0400383EXPORT_SYMBOL(ion_heap_destroy);