blob: f3f627dc740a5cc7380fe3c4b6ab76b86f96e6f4 [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070018#include <asm/page.h>
19#include <linux/dma-mapping.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070020#include <linux/err.h>
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070021#include <linux/highmem.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070022#include <linux/ion.h>
23#include <linux/mm.h>
24#include <linux/scatterlist.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
Olav Haugan3d4fe1a2012-01-13 11:42:15 -080027#include <linux/seq_file.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070028#include "ion_priv.h"
Laura Abbottabcb6f72011-10-04 16:26:49 -070029#include <mach/memory.h>
Olav Haugan85c95402012-05-30 17:32:37 -070030#include <asm/cacheflush.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070031#include <linux/msm_ion.h>
Neeti Desai3f3c2822013-03-08 17:29:53 -080032#include <linux/dma-mapping.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070033
Laura Abbott68c80642011-10-21 17:32:27 -070034static atomic_t system_heap_allocated;
35static atomic_t system_contig_heap_allocated;
36
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070037struct page_info {
38 struct page *page;
39 unsigned long order;
40 struct list_head list;
41};
42
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -070043static struct page_info *alloc_largest_available(unsigned long size,
44 bool split_pages)
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070045{
46 static unsigned int orders[] = {8, 4, 0};
47 struct page *page;
48 struct page_info *info;
49 int i;
50
51 for (i = 0; i < ARRAY_SIZE(orders); i++) {
52 if (size < (1 << orders[i]) * PAGE_SIZE)
53 continue;
Dima Zavindcd71bc2012-07-27 15:29:55 -070054 page = alloc_pages(GFP_HIGHUSER | __GFP_ZERO |
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070055 __GFP_NOWARN | __GFP_NORETRY, orders[i]);
56 if (!page)
57 continue;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -070058 if (split_pages)
59 split_page(page, orders[i]);
Rebecca Schultz Zavin6a93a292012-08-21 21:35:20 -070060 info = kmalloc(sizeof(struct page_info *), GFP_KERNEL);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070061 info->page = page;
62 info->order = orders[i];
63 return info;
64 }
65 return NULL;
66}
67
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070068static int ion_system_heap_allocate(struct ion_heap *heap,
69 struct ion_buffer *buffer,
70 unsigned long size, unsigned long align,
71 unsigned long flags)
72{
Laura Abbottb14ed962012-01-30 14:18:08 -080073 struct sg_table *table;
74 struct scatterlist *sg;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070075 int ret;
76 struct list_head pages;
77 struct page_info *info, *tmp_info;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -070078 int i = 0;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070079 long size_remaining = PAGE_ALIGN(size);
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -070080 bool split_pages = ion_buffer_fault_user_mappings(buffer);
81
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070082
83 INIT_LIST_HEAD(&pages);
84 while (size_remaining > 0) {
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -070085 info = alloc_largest_available(size_remaining, split_pages);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070086 if (!info)
87 goto err;
88 list_add_tail(&info->list, &pages);
89 size_remaining -= (1 << info->order) * PAGE_SIZE;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -070090 i++;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070091 }
Laura Abbott68c80642011-10-21 17:32:27 -070092
Laura Abbottb14ed962012-01-30 14:18:08 -080093 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
94 if (!table)
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070095 goto err;
96
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -070097 if (split_pages)
98 ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
99 GFP_KERNEL);
100 else
101 ret = sg_alloc_table(table, i, GFP_KERNEL);
102
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700103 if (ret)
104 goto err1;
105
106 sg = table->sgl;
107 list_for_each_entry_safe(info, tmp_info, &pages, list) {
108 struct page *page = info->page;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700109
110 if (split_pages) {
111 for (i = 0; i < (1 << info->order); i++) {
112 sg_set_page(sg, page + i, PAGE_SIZE, 0);
113 sg = sg_next(sg);
114 }
115 } else {
116 sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
117 0);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700118 sg = sg_next(sg);
119 }
120 list_del(&info->list);
Rebecca Schultz Zavin6a93a292012-08-21 21:35:20 -0700121 kfree(info);
Laura Abbottb14ed962012-01-30 14:18:08 -0800122 }
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700123
124 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
125 DMA_BIDIRECTIONAL);
126
Laura Abbottb14ed962012-01-30 14:18:08 -0800127 buffer->priv_virt = table;
Laura Abbott68c80642011-10-21 17:32:27 -0700128 atomic_add(size, &system_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700129 return 0;
Laura Abbottb14ed962012-01-30 14:18:08 -0800130err1:
Laura Abbottb14ed962012-01-30 14:18:08 -0800131 kfree(table);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700132err:
133 list_for_each_entry(info, &pages, list) {
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700134 if (split_pages)
135 for (i = 0; i < (1 << info->order); i++)
136 __free_page(info->page + i);
137 else
138 __free_pages(info->page, info->order);
139
Rebecca Schultz Zavin6a93a292012-08-21 21:35:20 -0700140 kfree(info);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700141 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800142 return -ENOMEM;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700143}
144
145void ion_system_heap_free(struct ion_buffer *buffer)
146{
Laura Abbottb14ed962012-01-30 14:18:08 -0800147 int i;
148 struct scatterlist *sg;
149 struct sg_table *table = buffer->priv_virt;
150
151 for_each_sg(table->sgl, sg, table->nents, i)
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700152 __free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
Laura Abbottb14ed962012-01-30 14:18:08 -0800153 if (buffer->sg_table)
154 sg_free_table(buffer->sg_table);
155 kfree(buffer->sg_table);
Laura Abbott68c80642011-10-21 17:32:27 -0700156 atomic_sub(buffer->size, &system_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700157}
158
Laura Abbottb14ed962012-01-30 14:18:08 -0800159struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
160 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700161{
Laura Abbottb14ed962012-01-30 14:18:08 -0800162 return buffer->priv_virt;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700163}
164
165void ion_system_heap_unmap_dma(struct ion_heap *heap,
166 struct ion_buffer *buffer)
167{
Laura Abbottb14ed962012-01-30 14:18:08 -0800168 return;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700169}
170
171void *ion_system_heap_map_kernel(struct ion_heap *heap,
Laura Abbottb14ed962012-01-30 14:18:08 -0800172 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700173{
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700174 struct scatterlist *sg;
175 int i, j;
176 void *vaddr;
177 pgprot_t pgprot;
178 struct sg_table *table = buffer->priv_virt;
179 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
180 struct page **pages = kzalloc(sizeof(struct page *) * npages,
181 GFP_KERNEL);
182 struct page **tmp = pages;
Laura Abbottb14ed962012-01-30 14:18:08 -0800183
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700184 if (buffer->flags & ION_FLAG_CACHED)
185 pgprot = PAGE_KERNEL;
186 else
187 pgprot = pgprot_writecombine(PAGE_KERNEL);
Laura Abbottb14ed962012-01-30 14:18:08 -0800188
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700189 for_each_sg(table->sgl, sg, table->nents, i) {
190 int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
191 struct page *page = sg_page(sg);
192 BUG_ON(i >= npages);
193 for (j = 0; j < npages_this_entry; j++) {
194 *(tmp++) = page++;
195 }
Laura Abbott894fd582011-08-19 13:33:56 -0700196 }
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700197 vaddr = vmap(pages, npages, VM_MAP, pgprot);
198 kfree(pages);
199
200 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700201}
202
203void ion_system_heap_unmap_kernel(struct ion_heap *heap,
204 struct ion_buffer *buffer)
205{
Laura Abbottb14ed962012-01-30 14:18:08 -0800206 vunmap(buffer->vaddr);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700207}
208
209int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800210 struct vm_area_struct *vma)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700211{
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700212 struct sg_table *table = buffer->priv_virt;
213 unsigned long addr = vma->vm_start;
214 unsigned long offset = vma->vm_pgoff;
215 struct scatterlist *sg;
216 int i;
217
Laura Abbottb14ed962012-01-30 14:18:08 -0800218 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbott894fd582011-08-19 13:33:56 -0700219 pr_err("%s: cannot map system heap uncached\n", __func__);
220 return -EINVAL;
221 }
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700222
223 for_each_sg(table->sgl, sg, table->nents, i) {
224 if (offset) {
225 offset--;
226 continue;
227 }
228 remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)),
229 sg_dma_len(sg), vma->vm_page_prot);
230 addr += sg_dma_len(sg);
231 }
232 return 0;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700233}
234
Olav Haugan0671b9a2012-05-25 11:58:56 -0700235static int ion_system_print_debug(struct ion_heap *heap, struct seq_file *s,
236 const struct rb_root *unused)
Laura Abbott68c80642011-10-21 17:32:27 -0700237{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800238 seq_printf(s, "total bytes currently allocated: %lx\n",
239 (unsigned long) atomic_read(&system_heap_allocated));
240
241 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700242}
243
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700244static struct ion_heap_ops vmalloc_ops = {
245 .allocate = ion_system_heap_allocate,
246 .free = ion_system_heap_free,
247 .map_dma = ion_system_heap_map_dma,
248 .unmap_dma = ion_system_heap_unmap_dma,
249 .map_kernel = ion_system_heap_map_kernel,
250 .unmap_kernel = ion_system_heap_unmap_kernel,
251 .map_user = ion_system_heap_map_user,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800252 .print_debug = ion_system_print_debug,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700253};
254
Olav Haugan85c95402012-05-30 17:32:37 -0700255struct ion_heap *ion_system_heap_create(struct ion_platform_heap *pheap)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700256{
257 struct ion_heap *heap;
258
259 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
260 if (!heap)
261 return ERR_PTR(-ENOMEM);
262 heap->ops = &vmalloc_ops;
263 heap->type = ION_HEAP_TYPE_SYSTEM;
264 return heap;
265}
266
267void ion_system_heap_destroy(struct ion_heap *heap)
268{
269 kfree(heap);
270}
271
272static int ion_system_contig_heap_allocate(struct ion_heap *heap,
273 struct ion_buffer *buffer,
274 unsigned long len,
275 unsigned long align,
276 unsigned long flags)
277{
278 buffer->priv_virt = kzalloc(len, GFP_KERNEL);
279 if (!buffer->priv_virt)
280 return -ENOMEM;
Laura Abbott68c80642011-10-21 17:32:27 -0700281 atomic_add(len, &system_contig_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700282 return 0;
283}
284
285void ion_system_contig_heap_free(struct ion_buffer *buffer)
286{
287 kfree(buffer->priv_virt);
Laura Abbott68c80642011-10-21 17:32:27 -0700288 atomic_sub(buffer->size, &system_contig_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700289}
290
291static int ion_system_contig_heap_phys(struct ion_heap *heap,
292 struct ion_buffer *buffer,
293 ion_phys_addr_t *addr, size_t *len)
294{
295 *addr = virt_to_phys(buffer->priv_virt);
296 *len = buffer->size;
297 return 0;
298}
299
Laura Abbottb14ed962012-01-30 14:18:08 -0800300struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700301 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700302{
Laura Abbottb14ed962012-01-30 14:18:08 -0800303 struct sg_table *table;
304 int ret;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700305
Laura Abbottb14ed962012-01-30 14:18:08 -0800306 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
307 if (!table)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700308 return ERR_PTR(-ENOMEM);
Laura Abbottb14ed962012-01-30 14:18:08 -0800309 ret = sg_alloc_table(table, 1, GFP_KERNEL);
310 if (ret) {
311 kfree(table);
312 return ERR_PTR(ret);
313 }
314 sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
315 0);
316 return table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700317}
318
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700319void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
320 struct ion_buffer *buffer)
321{
322 sg_free_table(buffer->sg_table);
323 kfree(buffer->sg_table);
324}
325
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700326int ion_system_contig_heap_map_user(struct ion_heap *heap,
327 struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800328 struct vm_area_struct *vma)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700329{
330 unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
Laura Abbott894fd582011-08-19 13:33:56 -0700331
Laura Abbottb14ed962012-01-30 14:18:08 -0800332 if (ION_IS_CACHED(buffer->flags))
Laura Abbott894fd582011-08-19 13:33:56 -0700333 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700334 vma->vm_end - vma->vm_start,
335 vma->vm_page_prot);
Laura Abbott894fd582011-08-19 13:33:56 -0700336 else {
337 pr_err("%s: cannot map system heap uncached\n", __func__);
338 return -EINVAL;
339 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700340}
341
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800342static int ion_system_contig_print_debug(struct ion_heap *heap,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700343 struct seq_file *s,
344 const struct rb_root *unused)
Laura Abbott68c80642011-10-21 17:32:27 -0700345{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800346 seq_printf(s, "total bytes currently allocated: %lx\n",
347 (unsigned long) atomic_read(&system_contig_heap_allocated));
348
349 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700350}
351
Rohit Vaswani35edc882012-11-20 10:20:47 -0800352void *ion_system_contig_heap_map_kernel(struct ion_heap *heap,
353 struct ion_buffer *buffer)
354{
355 return buffer->priv_virt;
356}
357
358void ion_system_contig_heap_unmap_kernel(struct ion_heap *heap,
359 struct ion_buffer *buffer)
360{
361 return;
362}
363
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700364static struct ion_heap_ops kmalloc_ops = {
365 .allocate = ion_system_contig_heap_allocate,
366 .free = ion_system_contig_heap_free,
367 .phys = ion_system_contig_heap_phys,
368 .map_dma = ion_system_contig_heap_map_dma,
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700369 .unmap_dma = ion_system_contig_heap_unmap_dma,
Rohit Vaswani35edc882012-11-20 10:20:47 -0800370 .map_kernel = ion_system_contig_heap_map_kernel,
371 .unmap_kernel = ion_system_contig_heap_unmap_kernel,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700372 .map_user = ion_system_contig_heap_map_user,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800373 .print_debug = ion_system_contig_print_debug,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700374};
375
Olav Haugan85c95402012-05-30 17:32:37 -0700376struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *pheap)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700377{
378 struct ion_heap *heap;
379
380 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
381 if (!heap)
382 return ERR_PTR(-ENOMEM);
383 heap->ops = &kmalloc_ops;
384 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700385 return heap;
386}
387
388void ion_system_contig_heap_destroy(struct ion_heap *heap)
389{
390 kfree(heap);
391}
392