blob: 6261d893f2aabb20f7183b3c7a05da47ac9fbd99 [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Laura Abbotta8c373f2013-02-15 09:25:35 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/err.h>
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -070019#include <linux/freezer.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070020#include <linux/ion.h>
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -070021#include <linux/kthread.h>
Rebecca Schultz Zavin3df181c2012-11-15 10:43:46 -080022#include <linux/mm.h>
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -070023#include <linux/rtmutex.h>
24#include <linux/sched.h>
Rebecca Schultz Zavin3df181c2012-11-15 10:43:46 -080025#include <linux/scatterlist.h>
26#include <linux/vmalloc.h>
Mitchel Humpherys2bf68fc2013-10-03 17:51:49 -070027#include <linux/slab.h>
28#include <linux/highmem.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070029#include "ion_priv.h"
30
Rebecca Schultz Zavin3df181c2012-11-15 10:43:46 -080031void *ion_heap_map_kernel(struct ion_heap *heap,
32 struct ion_buffer *buffer)
33{
34 struct scatterlist *sg;
35 int i, j;
36 void *vaddr;
37 pgprot_t pgprot;
38 struct sg_table *table = buffer->sg_table;
39 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
40 struct page **pages = vmalloc(sizeof(struct page *) * npages);
41 struct page **tmp = pages;
42
43 if (!pages)
44 return 0;
45
46 if (buffer->flags & ION_FLAG_CACHED)
47 pgprot = PAGE_KERNEL;
48 else
49 pgprot = pgprot_writecombine(PAGE_KERNEL);
50
51 for_each_sg(table->sgl, sg, table->nents, i) {
52 int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
53 struct page *page = sg_page(sg);
54 BUG_ON(i >= npages);
55 for (j = 0; j < npages_this_entry; j++) {
56 *(tmp++) = page++;
57 }
58 }
59 vaddr = vmap(pages, npages, VM_MAP, pgprot);
60 vfree(pages);
61
62 return vaddr;
63}
64
65void ion_heap_unmap_kernel(struct ion_heap *heap,
66 struct ion_buffer *buffer)
67{
68 vunmap(buffer->vaddr);
69}
70
71int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
72 struct vm_area_struct *vma)
73{
74 struct sg_table *table = buffer->sg_table;
75 unsigned long addr = vma->vm_start;
76 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
77 struct scatterlist *sg;
78 int i;
79
80 for_each_sg(table->sgl, sg, table->nents, i) {
81 struct page *page = sg_page(sg);
82 unsigned long remainder = vma->vm_end - addr;
83 unsigned long len = sg_dma_len(sg);
84
85 if (offset >= sg_dma_len(sg)) {
86 offset -= sg_dma_len(sg);
87 continue;
88 } else if (offset) {
89 page += offset / PAGE_SIZE;
90 len = sg_dma_len(sg) - offset;
91 offset = 0;
92 }
93 len = min(len, remainder);
94 remap_pfn_range(vma, addr, page_to_pfn(page), len,
95 vma->vm_page_prot);
96 addr += len;
97 if (addr >= vma->vm_end)
98 return 0;
99 }
100 return 0;
101}
102
Mitchel Humpherys2bf68fc2013-10-03 17:51:49 -0700103#define MAX_VMAP_RETRIES 10
104
105/**
106 * An optimized page-zero'ing function. vmaps arrays of pages in large
107 * chunks to minimize the number of memsets and vmaps/vunmaps.
108 *
109 * Note that the `pages' array should be composed of all 4K pages.
110 */
Mitchel Humpherysae25ce32014-01-03 15:32:58 -0800111int ion_heap_pages_zero(struct page **pages, int num_pages)
Mitchel Humpherys2bf68fc2013-10-03 17:51:49 -0700112{
113 int i, j, k, npages_to_vmap;
114 void *ptr = NULL;
115 /*
116 * It's cheaper just to use writecombine memory and skip the
117 * cache vs. using a cache memory and trying to flush it afterwards
118 */
119 pgprot_t pgprot = pgprot_writecombine(pgprot_kernel);
120
121 /*
122 * As an optimization, we manually zero out all of the pages
123 * in one fell swoop here. To safeguard against insufficient
124 * vmalloc space, we only vmap `npages_to_vmap' at a time,
125 * starting with a conservative estimate of 1/8 of the total
126 * number of vmalloc pages available.
127 */
128 npages_to_vmap = ((VMALLOC_END - VMALLOC_START)/8)
129 >> PAGE_SHIFT;
130 for (i = 0; i < num_pages; i += npages_to_vmap) {
131 npages_to_vmap = min(npages_to_vmap, num_pages - i);
132 for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap;
133 ++j) {
134 ptr = vmap(&pages[i], npages_to_vmap,
135 VM_IOREMAP, pgprot);
136 if (ptr)
137 break;
138 else
139 npages_to_vmap >>= 1;
140 }
141 if (!ptr)
142 return -ENOMEM;
143
144 memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
Mitchel Humpherysae25ce32014-01-03 15:32:58 -0800145 /*
146 * invalidate the cache to pick up the zeroing
147 */
148 for (k = 0; k < npages_to_vmap; k++) {
149 void *p = kmap_atomic(pages[i + k]);
150 phys_addr_t phys = page_to_phys(
151 pages[i + k]);
Mitchel Humpherys2bf68fc2013-10-03 17:51:49 -0700152
Mitchel Humpherysae25ce32014-01-03 15:32:58 -0800153 dmac_inv_range(p, p + PAGE_SIZE);
154 outer_inv_range(phys, phys + PAGE_SIZE);
155 kunmap_atomic(p);
Mitchel Humpherys2bf68fc2013-10-03 17:51:49 -0700156 }
157 vunmap(ptr);
158 }
159
160 return 0;
161}
162
163static int ion_heap_alloc_pages_mem(int page_tbl_size,
164 struct pages_mem *pages_mem)
165{
166 struct page **pages;
167 pages_mem->free_fn = kfree;
168 if (page_tbl_size > SZ_8K) {
169 /*
170 * Do fallback to ensure we have a balance between
171 * performance and availability.
172 */
173 pages = kmalloc(page_tbl_size,
174 __GFP_COMP | __GFP_NORETRY |
175 __GFP_NO_KSWAPD | __GFP_NOWARN);
176 if (!pages) {
177 pages = vmalloc(page_tbl_size);
178 pages_mem->free_fn = vfree;
179 }
180 } else {
181 pages = kmalloc(page_tbl_size, GFP_KERNEL);
182 }
183
184 if (!pages)
185 return -ENOMEM;
186
187 pages_mem->pages = pages;
188 return 0;
189}
190
191static void ion_heap_free_pages_mem(struct pages_mem *pages_mem)
192{
193 pages_mem->free_fn(pages_mem->pages);
194}
195
Mitchel Humpherysae25ce32014-01-03 15:32:58 -0800196int ion_heap_high_order_page_zero(struct page *page, int order)
Mitchel Humpherys2bf68fc2013-10-03 17:51:49 -0700197{
198 int i, ret;
199 struct pages_mem pages_mem;
200 int npages = 1 << order;
201 int page_tbl_size = sizeof(struct page *) * npages;
202
203 if (ion_heap_alloc_pages_mem(page_tbl_size, &pages_mem))
204 return -ENOMEM;
205
206 for (i = 0; i < (1 << order); ++i)
207 pages_mem.pages[i] = page + i;
208
Mitchel Humpherysae25ce32014-01-03 15:32:58 -0800209 ret = ion_heap_pages_zero(pages_mem.pages, npages);
Mitchel Humpherys2bf68fc2013-10-03 17:51:49 -0700210 ion_heap_free_pages_mem(&pages_mem);
211 return ret;
212}
213
Rebecca Schultz Zavinca12f5d2013-01-09 11:26:37 -0800214int ion_heap_buffer_zero(struct ion_buffer *buffer)
215{
216 struct sg_table *table = buffer->sg_table;
Mitchel Humpherys2bf68fc2013-10-03 17:51:49 -0700217 struct scatterlist *sg;
218 int i, j, ret = 0, npages = 0, page_tbl_size = 0;
219 struct pages_mem pages_mem;
220
221 for_each_sg(table->sgl, sg, table->nents, i) {
222 unsigned long len = sg_dma_len(sg);
223 int nrpages = len >> PAGE_SHIFT;
224 page_tbl_size += sizeof(struct page *) * nrpages;
225 }
226
227 if (ion_heap_alloc_pages_mem(page_tbl_size, &pages_mem))
228 return -ENOMEM;
229
230 for_each_sg(table->sgl, sg, table->nents, i) {
231 struct page *page = sg_page(sg);
232 unsigned long len = sg_dma_len(sg);
233
234 for (j = 0; j < len / PAGE_SIZE; j++)
235 pages_mem.pages[npages++] = page + j;
236 }
237
Mitchel Humpherysae25ce32014-01-03 15:32:58 -0800238 ret = ion_heap_pages_zero(pages_mem.pages, npages);
Mitchel Humpherys2bf68fc2013-10-03 17:51:49 -0700239 ion_heap_free_pages_mem(&pages_mem);
240 return ret;
241}
242
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -0700243void ion_heap_free_page(struct ion_buffer *buffer, struct page *page,
244 unsigned int order)
245{
246 int i;
247
248 if (!ion_buffer_fault_user_mappings(buffer)) {
249 __free_pages(page, order);
250 return;
251 }
252 for (i = 0; i < (1 << order); i++)
253 __free_page(page + i);
254}
255
256void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer)
257{
258 rt_mutex_lock(&heap->lock);
259 list_add(&buffer->list, &heap->free_list);
260 heap->free_list_size += buffer->size;
261 rt_mutex_unlock(&heap->lock);
262 wake_up(&heap->waitqueue);
263}
264
265size_t ion_heap_freelist_size(struct ion_heap *heap)
266{
267 size_t size;
268
269 rt_mutex_lock(&heap->lock);
270 size = heap->free_list_size;
271 rt_mutex_unlock(&heap->lock);
272
273 return size;
274}
275
Mitchel Humpherys58607432013-08-06 15:08:23 -0700276static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
277 bool skip_pools)
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -0700278{
279 struct ion_buffer *buffer, *tmp;
280 size_t total_drained = 0;
281
282 if (ion_heap_freelist_size(heap) == 0)
283 return 0;
284
285 rt_mutex_lock(&heap->lock);
286 if (size == 0)
287 size = heap->free_list_size;
288
289 list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
290 if (total_drained >= size)
291 break;
292 list_del(&buffer->list);
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -0700293 heap->free_list_size -= buffer->size;
Mitchel Humpherys58607432013-08-06 15:08:23 -0700294 if (skip_pools)
295 buffer->flags |= ION_FLAG_FREED_FROM_SHRINKER;
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -0700296 total_drained += buffer->size;
Mitchel Humpherysdf742f12013-08-06 11:19:42 -0700297 ion_buffer_destroy(buffer);
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -0700298 }
299 rt_mutex_unlock(&heap->lock);
300
301 return total_drained;
302}
303
Mitchel Humpherys58607432013-08-06 15:08:23 -0700304size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
305{
306 return _ion_heap_freelist_drain(heap, size, false);
307}
308
309size_t ion_heap_freelist_drain_from_shrinker(struct ion_heap *heap, size_t size)
310{
311 return _ion_heap_freelist_drain(heap, size, true);
312}
313
Rebecca Schultz Zavin83ff5da2013-05-23 13:37:25 -0700314int ion_heap_deferred_free(void *data)
315{
316 struct ion_heap *heap = data;
317
318 while (true) {
319 struct ion_buffer *buffer;
320
321 wait_event_freezable(heap->waitqueue,
322 ion_heap_freelist_size(heap) > 0);
323
324 rt_mutex_lock(&heap->lock);
325 if (list_empty(&heap->free_list)) {
326 rt_mutex_unlock(&heap->lock);
327 continue;
328 }
329 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
330 list);
331 list_del(&buffer->list);
332 heap->free_list_size -= buffer->size;
333 rt_mutex_unlock(&heap->lock);
334 ion_buffer_destroy(buffer);
335 }
336
337 return 0;
338}
339
340int ion_heap_init_deferred_free(struct ion_heap *heap)
341{
342 struct sched_param param = { .sched_priority = 0 };
343
344 INIT_LIST_HEAD(&heap->free_list);
345 heap->free_list_size = 0;
346 rt_mutex_init(&heap->lock);
347 init_waitqueue_head(&heap->waitqueue);
348 heap->task = kthread_run(ion_heap_deferred_free, heap,
349 "%s", heap->name);
350 sched_setscheduler(heap->task, SCHED_IDLE, &param);
351 if (IS_ERR(heap->task)) {
352 pr_err("%s: creating thread for deferred free failed\n",
353 __func__);
354 return PTR_RET(heap->task);
355 }
356 return 0;
357}
358
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700359struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
360{
361 struct ion_heap *heap = NULL;
362
Laura Abbott3c6da802013-03-21 10:47:24 -0700363 switch (heap_data->type) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700364 case ION_HEAP_TYPE_SYSTEM_CONTIG:
365 heap = ion_system_contig_heap_create(heap_data);
366 break;
367 case ION_HEAP_TYPE_SYSTEM:
368 heap = ion_system_heap_create(heap_data);
369 break;
370 case ION_HEAP_TYPE_CARVEOUT:
371 heap = ion_carveout_heap_create(heap_data);
372 break;
Rebecca Schultz Zavind2ce6f82012-11-15 10:52:45 -0800373 case ION_HEAP_TYPE_CHUNK:
374 heap = ion_chunk_heap_create(heap_data);
375 break;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700376 default:
377 pr_err("%s: Invalid heap type %d\n", __func__,
378 heap_data->type);
379 return ERR_PTR(-EINVAL);
380 }
Choi, Jong-Hwan42c5a072011-07-07 09:07:04 +0900381
382 if (IS_ERR_OR_NULL(heap)) {
Laura Abbott1135c9e2013-03-13 15:33:40 -0700383 pr_err("%s: error creating heap %s type %d base %pa size %u\n",
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700384 __func__, heap_data->name, heap_data->type,
Laura Abbott1135c9e2013-03-13 15:33:40 -0700385 &heap_data->base, heap_data->size);
Choi, Jong-Hwan42c5a072011-07-07 09:07:04 +0900386 return ERR_PTR(-EINVAL);
387 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700388
389 heap->name = heap_data->name;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700390 heap->id = heap_data->id;
Benjamin Gaignard8dff0a62012-06-25 15:30:18 -0700391 heap->priv = heap_data->priv;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700392 return heap;
393}
394
395void ion_heap_destroy(struct ion_heap *heap)
396{
397 if (!heap)
398 return;
399
Laura Abbott3c6da802013-03-21 10:47:24 -0700400 switch (heap->type) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700401 case ION_HEAP_TYPE_SYSTEM_CONTIG:
402 ion_system_contig_heap_destroy(heap);
403 break;
404 case ION_HEAP_TYPE_SYSTEM:
405 ion_system_heap_destroy(heap);
406 break;
407 case ION_HEAP_TYPE_CARVEOUT:
408 ion_carveout_heap_destroy(heap);
409 break;
Rebecca Schultz Zavind2ce6f82012-11-15 10:52:45 -0800410 case ION_HEAP_TYPE_CHUNK:
411 ion_chunk_heap_destroy(heap);
412 break;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700413 default:
414 pr_err("%s: Invalid heap type %d\n", __func__,
415 heap->type);
416 }
417}