blob: 98711ce03753e7c44c50ffffd5c87c8f0c4064fd [file] [log] [blame]
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001/*
2 * drivers/staging/android/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080017#include <asm/page.h>
18#include <linux/dma-mapping.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080019#include <linux/err.h>
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080020#include <linux/highmem.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080021#include <linux/mm.h>
22#include <linux/scatterlist.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include "ion.h"
26#include "ion_priv.h"
27
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080028struct page_info {
29 struct page *page;
30 unsigned long order;
31 struct list_head list;
32};
33
34static struct page_info *alloc_largest_available(unsigned long size)
35{
36 static unsigned int orders[] = {8, 4, 0};
37 struct page *page;
38 struct page_info *info;
39 int i;
40
41 for (i = 0; i < ARRAY_SIZE(orders); i++) {
42 if (size < (1 << orders[i]) * PAGE_SIZE)
43 continue;
44 page = alloc_pages(GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP |
45 __GFP_NOWARN | __GFP_NORETRY, orders[i]);
46 if (!page)
47 continue;
48 split_page(page, orders[i]);
49 info = kmap(page);
50 info->page = page;
51 info->order = orders[i];
52 return info;
53 }
54 return NULL;
55}
56
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080057static int ion_system_heap_allocate(struct ion_heap *heap,
58 struct ion_buffer *buffer,
59 unsigned long size, unsigned long align,
60 unsigned long flags)
61{
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -080062 struct sg_table *table;
63 struct scatterlist *sg;
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080064 int ret;
65 struct list_head pages;
66 struct page_info *info, *tmp_info;
67 int i;
68 long size_remaining = PAGE_ALIGN(size);
69
70 INIT_LIST_HEAD(&pages);
71 while (size_remaining > 0) {
72 info = alloc_largest_available(size_remaining);
73 if (!info)
74 goto err;
75 list_add_tail(&info->list, &pages);
76 size_remaining -= (1 << info->order) * PAGE_SIZE;
77 }
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -080078
79 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
80 if (!table)
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080081 goto err;
82
83 ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE, GFP_KERNEL);
84 if (ret)
85 goto err1;
86
87 sg = table->sgl;
88 list_for_each_entry_safe(info, tmp_info, &pages, list) {
89 struct page *page = info->page;
90 for (i = 0; i < (1 << info->order); i++) {
91 sg_set_page(sg, page + i, PAGE_SIZE, 0);
92 sg = sg_next(sg);
93 }
94 list_del(&info->list);
95 memset(info, 0, sizeof(struct page_info));
96 kunmap(page);
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -080097 }
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080098
99 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
100 DMA_BIDIRECTIONAL);
101
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800102 buffer->priv_virt = table;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800103 return 0;
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800104err1:
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800105 kfree(table);
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800106err:
107 list_for_each_entry(info, &pages, list) {
108 for (i = 0; i < (1 << info->order); i++)
109 __free_page(info->page + i);
110 kunmap(info->page);
111 }
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800112 return -ENOMEM;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800113}
114
115void ion_system_heap_free(struct ion_buffer *buffer)
116{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800117 int i;
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800118 struct scatterlist *sg;
119 struct sg_table *table = buffer->priv_virt;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800120
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800121 for_each_sg(table->sgl, sg, table->nents, i)
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800122 __free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
Rebecca Schultz Zavin4d5ca322013-12-13 14:23:37 -0800123 if (buffer->sg_table)
124 sg_free_table(buffer->sg_table);
125 kfree(buffer->sg_table);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800126}
127
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800128struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
129 struct ion_buffer *buffer)
130{
131 return buffer->priv_virt;
132}
133
134void ion_system_heap_unmap_dma(struct ion_heap *heap,
135 struct ion_buffer *buffer)
136{
137 return;
138}
139
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800140void *ion_system_heap_map_kernel(struct ion_heap *heap,
141 struct ion_buffer *buffer)
142{
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800143 struct scatterlist *sg;
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800144 int i, j;
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800145 void *vaddr;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800146 pgprot_t pgprot;
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800147 struct sg_table *table = buffer->priv_virt;
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800148 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
149 struct page **pages = kzalloc(sizeof(struct page *) * npages,
150 GFP_KERNEL);
151 struct page **tmp = pages;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800152
153 if (buffer->flags & ION_FLAG_CACHED)
154 pgprot = PAGE_KERNEL;
155 else
156 pgprot = pgprot_writecombine(PAGE_KERNEL);
157
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800158 for_each_sg(table->sgl, sg, table->nents, i) {
159 int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
160 struct page *page = sg_page(sg);
161 BUG_ON(i >= npages);
162 for (j = 0; j < npages_this_entry; j++) {
163 *(tmp++) = page++;
164 }
165 }
166 vaddr = vmap(pages, npages, VM_MAP, pgprot);
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800167 kfree(pages);
168
169 return vaddr;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800170}
171
172void ion_system_heap_unmap_kernel(struct ion_heap *heap,
173 struct ion_buffer *buffer)
174{
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800175 vunmap(buffer->vaddr);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800176}
177
178int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
179 struct vm_area_struct *vma)
180{
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800181 struct sg_table *table = buffer->priv_virt;
182 unsigned long addr = vma->vm_start;
183 unsigned long offset = vma->vm_pgoff;
184 struct scatterlist *sg;
185 int i;
186
187 for_each_sg(table->sgl, sg, table->nents, i) {
188 if (offset) {
189 offset--;
190 continue;
191 }
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800192 remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)),
193 sg_dma_len(sg), vma->vm_page_prot);
194 addr += sg_dma_len(sg);
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800195 }
196 return 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800197}
198
199static struct ion_heap_ops vmalloc_ops = {
200 .allocate = ion_system_heap_allocate,
201 .free = ion_system_heap_free,
202 .map_dma = ion_system_heap_map_dma,
203 .unmap_dma = ion_system_heap_unmap_dma,
204 .map_kernel = ion_system_heap_map_kernel,
205 .unmap_kernel = ion_system_heap_unmap_kernel,
206 .map_user = ion_system_heap_map_user,
207};
208
209struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
210{
211 struct ion_heap *heap;
212
213 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
214 if (!heap)
215 return ERR_PTR(-ENOMEM);
216 heap->ops = &vmalloc_ops;
217 heap->type = ION_HEAP_TYPE_SYSTEM;
218 return heap;
219}
220
221void ion_system_heap_destroy(struct ion_heap *heap)
222{
223 kfree(heap);
224}
225
226static int ion_system_contig_heap_allocate(struct ion_heap *heap,
227 struct ion_buffer *buffer,
228 unsigned long len,
229 unsigned long align,
230 unsigned long flags)
231{
232 buffer->priv_virt = kzalloc(len, GFP_KERNEL);
233 if (!buffer->priv_virt)
234 return -ENOMEM;
235 return 0;
236}
237
238void ion_system_contig_heap_free(struct ion_buffer *buffer)
239{
240 kfree(buffer->priv_virt);
241}
242
243static int ion_system_contig_heap_phys(struct ion_heap *heap,
244 struct ion_buffer *buffer,
245 ion_phys_addr_t *addr, size_t *len)
246{
247 *addr = virt_to_phys(buffer->priv_virt);
248 *len = buffer->size;
249 return 0;
250}
251
Rebecca Schultz Zavin4d5ca322013-12-13 14:23:37 -0800252struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800253 struct ion_buffer *buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800254{
Rebecca Schultz Zavin4d5ca322013-12-13 14:23:37 -0800255 struct sg_table *table;
256 int ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800257
Rebecca Schultz Zavin4d5ca322013-12-13 14:23:37 -0800258 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
259 if (!table)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800260 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavin4d5ca322013-12-13 14:23:37 -0800261 ret = sg_alloc_table(table, 1, GFP_KERNEL);
262 if (ret) {
263 kfree(table);
264 return ERR_PTR(ret);
265 }
266 sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
267 0);
268 return table;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800269}
270
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800271void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
272 struct ion_buffer *buffer)
273{
274 sg_free_table(buffer->sg_table);
275 kfree(buffer->sg_table);
276}
277
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800278int ion_system_contig_heap_map_user(struct ion_heap *heap,
279 struct ion_buffer *buffer,
280 struct vm_area_struct *vma)
281{
282 unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
283 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
284 vma->vm_end - vma->vm_start,
285 vma->vm_page_prot);
286
287}
288
289static struct ion_heap_ops kmalloc_ops = {
290 .allocate = ion_system_contig_heap_allocate,
291 .free = ion_system_contig_heap_free,
292 .phys = ion_system_contig_heap_phys,
293 .map_dma = ion_system_contig_heap_map_dma,
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800294 .unmap_dma = ion_system_contig_heap_unmap_dma,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800295 .map_kernel = ion_system_heap_map_kernel,
296 .unmap_kernel = ion_system_heap_unmap_kernel,
297 .map_user = ion_system_contig_heap_map_user,
298};
299
300struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
301{
302 struct ion_heap *heap;
303
304 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
305 if (!heap)
306 return ERR_PTR(-ENOMEM);
307 heap->ops = &kmalloc_ops;
308 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
309 return heap;
310}
311
312void ion_system_contig_heap_destroy(struct ion_heap *heap)
313{
314 kfree(heap);
315}
316