blob: 2fb9a649708969f48500b7604a62f926e68920b8 [file] [log] [blame]
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001/*
2 * drivers/staging/android/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080017#include <asm/page.h>
18#include <linux/dma-mapping.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080019#include <linux/err.h>
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080020#include <linux/highmem.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080021#include <linux/mm.h>
22#include <linux/scatterlist.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include "ion.h"
26#include "ion_priv.h"
27
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080028struct page_info {
29 struct page *page;
30 unsigned long order;
31 struct list_head list;
32};
33
34static struct page_info *alloc_largest_available(unsigned long size)
35{
36 static unsigned int orders[] = {8, 4, 0};
37 struct page *page;
38 struct page_info *info;
39 int i;
40
41 for (i = 0; i < ARRAY_SIZE(orders); i++) {
42 if (size < (1 << orders[i]) * PAGE_SIZE)
43 continue;
Dima Zavinfe65ec52013-12-13 14:23:53 -080044 page = alloc_pages(GFP_HIGHUSER | __GFP_ZERO |
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080045 __GFP_NOWARN | __GFP_NORETRY, orders[i]);
46 if (!page)
47 continue;
48 split_page(page, orders[i]);
Rebecca Schultz Zavin708f0ca2013-12-13 14:23:59 -080049 info = kmalloc(sizeof(struct page_info *), GFP_KERNEL);
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080050 info->page = page;
51 info->order = orders[i];
52 return info;
53 }
54 return NULL;
55}
56
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080057static int ion_system_heap_allocate(struct ion_heap *heap,
58 struct ion_buffer *buffer,
59 unsigned long size, unsigned long align,
60 unsigned long flags)
61{
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -080062 struct sg_table *table;
63 struct scatterlist *sg;
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080064 int ret;
65 struct list_head pages;
66 struct page_info *info, *tmp_info;
67 int i;
68 long size_remaining = PAGE_ALIGN(size);
69
70 INIT_LIST_HEAD(&pages);
71 while (size_remaining > 0) {
72 info = alloc_largest_available(size_remaining);
73 if (!info)
74 goto err;
75 list_add_tail(&info->list, &pages);
76 size_remaining -= (1 << info->order) * PAGE_SIZE;
77 }
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -080078
79 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
80 if (!table)
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080081 goto err;
82
83 ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE, GFP_KERNEL);
84 if (ret)
85 goto err1;
86
87 sg = table->sgl;
88 list_for_each_entry_safe(info, tmp_info, &pages, list) {
89 struct page *page = info->page;
90 for (i = 0; i < (1 << info->order); i++) {
91 sg_set_page(sg, page + i, PAGE_SIZE, 0);
92 sg = sg_next(sg);
93 }
94 list_del(&info->list);
95 memset(info, 0, sizeof(struct page_info));
Rebecca Schultz Zavin708f0ca2013-12-13 14:23:59 -080096 kfree(info);
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -080097 }
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080098
99 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
100 DMA_BIDIRECTIONAL);
101
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800102 buffer->priv_virt = table;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800103 return 0;
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800104err1:
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800105 kfree(table);
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800106err:
107 list_for_each_entry(info, &pages, list) {
108 for (i = 0; i < (1 << info->order); i++)
109 __free_page(info->page + i);
Rebecca Schultz Zavin708f0ca2013-12-13 14:23:59 -0800110 kfree(info);
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800111 }
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800112 return -ENOMEM;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800113}
114
115void ion_system_heap_free(struct ion_buffer *buffer)
116{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800117 int i;
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800118 struct scatterlist *sg;
119 struct sg_table *table = buffer->priv_virt;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800120
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800121 for_each_sg(table->sgl, sg, table->nents, i)
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800122 __free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
Rebecca Schultz Zavin4d5ca322013-12-13 14:23:37 -0800123 if (buffer->sg_table)
124 sg_free_table(buffer->sg_table);
125 kfree(buffer->sg_table);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800126}
127
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800128struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
129 struct ion_buffer *buffer)
130{
131 return buffer->priv_virt;
132}
133
134void ion_system_heap_unmap_dma(struct ion_heap *heap,
135 struct ion_buffer *buffer)
136{
137 return;
138}
139
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800140void *ion_system_heap_map_kernel(struct ion_heap *heap,
141 struct ion_buffer *buffer)
142{
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800143 struct scatterlist *sg;
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800144 int i, j;
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800145 void *vaddr;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800146 pgprot_t pgprot;
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800147 struct sg_table *table = buffer->priv_virt;
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800148 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
149 struct page **pages = kzalloc(sizeof(struct page *) * npages,
150 GFP_KERNEL);
151 struct page **tmp = pages;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800152
153 if (buffer->flags & ION_FLAG_CACHED)
154 pgprot = PAGE_KERNEL;
155 else
156 pgprot = pgprot_writecombine(PAGE_KERNEL);
157
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800158 for_each_sg(table->sgl, sg, table->nents, i) {
159 int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
160 struct page *page = sg_page(sg);
161 BUG_ON(i >= npages);
162 for (j = 0; j < npages_this_entry; j++) {
163 *(tmp++) = page++;
164 }
165 }
166 vaddr = vmap(pages, npages, VM_MAP, pgprot);
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800167 kfree(pages);
168
169 return vaddr;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800170}
171
172void ion_system_heap_unmap_kernel(struct ion_heap *heap,
173 struct ion_buffer *buffer)
174{
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800175 vunmap(buffer->vaddr);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800176}
177
178int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
179 struct vm_area_struct *vma)
180{
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800181 struct sg_table *table = buffer->priv_virt;
182 unsigned long addr = vma->vm_start;
183 unsigned long offset = vma->vm_pgoff;
184 struct scatterlist *sg;
185 int i;
186
187 for_each_sg(table->sgl, sg, table->nents, i) {
188 if (offset) {
189 offset--;
190 continue;
191 }
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800192 remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)),
193 sg_dma_len(sg), vma->vm_page_prot);
194 addr += sg_dma_len(sg);
Rebecca Schultz Zavinb8230242013-12-13 14:23:58 -0800195 if (addr >= vma->vm_end)
196 return 0;
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800197 }
198 return 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800199}
200
201static struct ion_heap_ops vmalloc_ops = {
202 .allocate = ion_system_heap_allocate,
203 .free = ion_system_heap_free,
204 .map_dma = ion_system_heap_map_dma,
205 .unmap_dma = ion_system_heap_unmap_dma,
206 .map_kernel = ion_system_heap_map_kernel,
207 .unmap_kernel = ion_system_heap_unmap_kernel,
208 .map_user = ion_system_heap_map_user,
209};
210
211struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
212{
213 struct ion_heap *heap;
214
215 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
216 if (!heap)
217 return ERR_PTR(-ENOMEM);
218 heap->ops = &vmalloc_ops;
219 heap->type = ION_HEAP_TYPE_SYSTEM;
220 return heap;
221}
222
223void ion_system_heap_destroy(struct ion_heap *heap)
224{
225 kfree(heap);
226}
227
228static int ion_system_contig_heap_allocate(struct ion_heap *heap,
229 struct ion_buffer *buffer,
230 unsigned long len,
231 unsigned long align,
232 unsigned long flags)
233{
234 buffer->priv_virt = kzalloc(len, GFP_KERNEL);
235 if (!buffer->priv_virt)
236 return -ENOMEM;
237 return 0;
238}
239
240void ion_system_contig_heap_free(struct ion_buffer *buffer)
241{
242 kfree(buffer->priv_virt);
243}
244
245static int ion_system_contig_heap_phys(struct ion_heap *heap,
246 struct ion_buffer *buffer,
247 ion_phys_addr_t *addr, size_t *len)
248{
249 *addr = virt_to_phys(buffer->priv_virt);
250 *len = buffer->size;
251 return 0;
252}
253
Rebecca Schultz Zavin4d5ca322013-12-13 14:23:37 -0800254struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800255 struct ion_buffer *buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800256{
Rebecca Schultz Zavin4d5ca322013-12-13 14:23:37 -0800257 struct sg_table *table;
258 int ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800259
Rebecca Schultz Zavin4d5ca322013-12-13 14:23:37 -0800260 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
261 if (!table)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800262 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavin4d5ca322013-12-13 14:23:37 -0800263 ret = sg_alloc_table(table, 1, GFP_KERNEL);
264 if (ret) {
265 kfree(table);
266 return ERR_PTR(ret);
267 }
268 sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
269 0);
270 return table;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800271}
272
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800273void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
274 struct ion_buffer *buffer)
275{
276 sg_free_table(buffer->sg_table);
277 kfree(buffer->sg_table);
278}
279
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800280int ion_system_contig_heap_map_user(struct ion_heap *heap,
281 struct ion_buffer *buffer,
282 struct vm_area_struct *vma)
283{
284 unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
285 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
286 vma->vm_end - vma->vm_start,
287 vma->vm_page_prot);
288
289}
290
291static struct ion_heap_ops kmalloc_ops = {
292 .allocate = ion_system_contig_heap_allocate,
293 .free = ion_system_contig_heap_free,
294 .phys = ion_system_contig_heap_phys,
295 .map_dma = ion_system_contig_heap_map_dma,
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800296 .unmap_dma = ion_system_contig_heap_unmap_dma,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800297 .map_kernel = ion_system_heap_map_kernel,
298 .unmap_kernel = ion_system_heap_unmap_kernel,
299 .map_user = ion_system_contig_heap_map_user,
300};
301
302struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
303{
304 struct ion_heap *heap;
305
306 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
307 if (!heap)
308 return ERR_PTR(-ENOMEM);
309 heap->ops = &kmalloc_ops;
310 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
311 return heap;
312}
313
314void ion_system_contig_heap_destroy(struct ion_heap *heap)
315{
316 kfree(heap);
317}
318