blob: 5609b72fd602d3997ee399e256f3f4e0553648c0 [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/err.h>
18#include <linux/ion.h>
19#include <linux/mm.h>
20#include <linux/scatterlist.h>
21#include <linux/slab.h>
22#include <linux/vmalloc.h>
23#include "ion_priv.h"
Laura Abbottabcb6f72011-10-04 16:26:49 -070024#include <mach/memory.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070025
26static int ion_system_heap_allocate(struct ion_heap *heap,
27 struct ion_buffer *buffer,
28 unsigned long size, unsigned long align,
29 unsigned long flags)
30{
31 buffer->priv_virt = vmalloc_user(size);
32 if (!buffer->priv_virt)
33 return -ENOMEM;
34 return 0;
35}
36
37void ion_system_heap_free(struct ion_buffer *buffer)
38{
39 vfree(buffer->priv_virt);
40}
41
42struct scatterlist *ion_system_heap_map_dma(struct ion_heap *heap,
43 struct ion_buffer *buffer)
44{
45 struct scatterlist *sglist;
46 struct page *page;
47 int i;
48 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
49 void *vaddr = buffer->priv_virt;
50
51 sglist = vmalloc(npages * sizeof(struct scatterlist));
52 if (!sglist)
53 return ERR_PTR(-ENOMEM);
54 memset(sglist, 0, npages * sizeof(struct scatterlist));
55 sg_init_table(sglist, npages);
56 for (i = 0; i < npages; i++) {
57 page = vmalloc_to_page(vaddr);
58 if (!page)
59 goto end;
60 sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
61 vaddr += PAGE_SIZE;
62 }
63 /* XXX do cache maintenance for dma? */
64 return sglist;
65end:
66 vfree(sglist);
67 return NULL;
68}
69
70void ion_system_heap_unmap_dma(struct ion_heap *heap,
71 struct ion_buffer *buffer)
72{
73 /* XXX undo cache maintenance for dma? */
74 if (buffer->sglist)
75 vfree(buffer->sglist);
76}
77
78void *ion_system_heap_map_kernel(struct ion_heap *heap,
Laura Abbott894fd582011-08-19 13:33:56 -070079 struct ion_buffer *buffer,
80 unsigned long flags)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070081{
Laura Abbott35412032011-09-29 09:50:06 -070082 if (ION_IS_CACHED(flags))
Laura Abbott894fd582011-08-19 13:33:56 -070083 return buffer->priv_virt;
84 else {
85 pr_err("%s: cannot map system heap uncached\n", __func__);
86 return ERR_PTR(-EINVAL);
87 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070088}
89
90void ion_system_heap_unmap_kernel(struct ion_heap *heap,
91 struct ion_buffer *buffer)
92{
93}
94
95int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbott894fd582011-08-19 13:33:56 -070096 struct vm_area_struct *vma, unsigned long flags)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070097{
Laura Abbott35412032011-09-29 09:50:06 -070098 if (ION_IS_CACHED(flags))
Laura Abbott894fd582011-08-19 13:33:56 -070099 return remap_vmalloc_range(vma, buffer->priv_virt,
100 vma->vm_pgoff);
101 else {
102 pr_err("%s: cannot map system heap uncached\n", __func__);
103 return -EINVAL;
104 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700105}
106
Laura Abbottabcb6f72011-10-04 16:26:49 -0700107int ion_system_heap_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
108 void *vaddr, unsigned int offset, unsigned int length,
109 unsigned int cmd)
110{
111 unsigned long vstart, pstart;
112 void *vtemp;
113 unsigned long ln = 0;
114 void (*op)(unsigned long, unsigned long, unsigned long);
115
116 switch (cmd) {
117 case ION_IOC_CLEAN_CACHES:
118 op = clean_caches;
119 break;
120 case ION_IOC_INV_CACHES:
121 op = invalidate_caches;
122 break;
123 case ION_IOC_CLEAN_INV_CACHES:
124 op = clean_and_invalidate_caches;
125 break;
126 default:
127 return -EINVAL;
128 }
129
130 for (vtemp = buffer->priv_virt + offset,
131 vstart = (unsigned long) vaddr;
132 ln < length;
133 vtemp += PAGE_SIZE, ln += PAGE_SIZE,
134 vstart += PAGE_SIZE) {
135 pstart = page_to_phys(vmalloc_to_page(vtemp));
136 /*
137 * If vmalloc -> page -> phys is returning NULL, something
138 * has really gone wrong...
139 */
140 if (!pstart) {
141 WARN(1, "Could not translate %p to physical address\n",
142 vtemp);
143 return -EINVAL;
144 }
145
146 op(vstart, PAGE_SIZE, pstart);
147 }
148
149 return 0;
150}
151
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700152static struct ion_heap_ops vmalloc_ops = {
153 .allocate = ion_system_heap_allocate,
154 .free = ion_system_heap_free,
155 .map_dma = ion_system_heap_map_dma,
156 .unmap_dma = ion_system_heap_unmap_dma,
157 .map_kernel = ion_system_heap_map_kernel,
158 .unmap_kernel = ion_system_heap_unmap_kernel,
159 .map_user = ion_system_heap_map_user,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700160 .cache_op = ion_system_heap_cache_ops,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700161};
162
163struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
164{
165 struct ion_heap *heap;
166
167 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
168 if (!heap)
169 return ERR_PTR(-ENOMEM);
170 heap->ops = &vmalloc_ops;
171 heap->type = ION_HEAP_TYPE_SYSTEM;
172 return heap;
173}
174
175void ion_system_heap_destroy(struct ion_heap *heap)
176{
177 kfree(heap);
178}
179
180static int ion_system_contig_heap_allocate(struct ion_heap *heap,
181 struct ion_buffer *buffer,
182 unsigned long len,
183 unsigned long align,
184 unsigned long flags)
185{
186 buffer->priv_virt = kzalloc(len, GFP_KERNEL);
187 if (!buffer->priv_virt)
188 return -ENOMEM;
189 return 0;
190}
191
192void ion_system_contig_heap_free(struct ion_buffer *buffer)
193{
194 kfree(buffer->priv_virt);
195}
196
197static int ion_system_contig_heap_phys(struct ion_heap *heap,
198 struct ion_buffer *buffer,
199 ion_phys_addr_t *addr, size_t *len)
200{
201 *addr = virt_to_phys(buffer->priv_virt);
202 *len = buffer->size;
203 return 0;
204}
205
206struct scatterlist *ion_system_contig_heap_map_dma(struct ion_heap *heap,
207 struct ion_buffer *buffer)
208{
209 struct scatterlist *sglist;
210
211 sglist = vmalloc(sizeof(struct scatterlist));
212 if (!sglist)
213 return ERR_PTR(-ENOMEM);
214 sg_init_table(sglist, 1);
215 sg_set_page(sglist, virt_to_page(buffer->priv_virt), buffer->size, 0);
216 return sglist;
217}
218
219int ion_system_contig_heap_map_user(struct ion_heap *heap,
220 struct ion_buffer *buffer,
Laura Abbott894fd582011-08-19 13:33:56 -0700221 struct vm_area_struct *vma,
222 unsigned long flags)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700223{
224 unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
Laura Abbott894fd582011-08-19 13:33:56 -0700225
Laura Abbott35412032011-09-29 09:50:06 -0700226 if (ION_IS_CACHED(flags))
Laura Abbott894fd582011-08-19 13:33:56 -0700227 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700228 vma->vm_end - vma->vm_start,
229 vma->vm_page_prot);
Laura Abbott894fd582011-08-19 13:33:56 -0700230 else {
231 pr_err("%s: cannot map system heap uncached\n", __func__);
232 return -EINVAL;
233 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700234}
235
Laura Abbottabcb6f72011-10-04 16:26:49 -0700236int ion_system_contig_heap_cache_ops(struct ion_heap *heap,
237 struct ion_buffer *buffer, void *vaddr,
238 unsigned int offset, unsigned int length,
239 unsigned int cmd)
240{
241 unsigned long vstart, pstart;
242
243 pstart = virt_to_phys(buffer->priv_virt) + offset;
244 if (!pstart) {
245 WARN(1, "Could not do virt to phys translation on %p\n",
246 buffer->priv_virt);
247 return -EINVAL;
248 }
249
250 vstart = (unsigned long) vaddr;
251
252 switch (cmd) {
253 case ION_IOC_CLEAN_CACHES:
254 clean_caches(vstart, length, pstart);
255 break;
256 case ION_IOC_INV_CACHES:
257 invalidate_caches(vstart, length, pstart);
258 break;
259 case ION_IOC_CLEAN_INV_CACHES:
260 clean_and_invalidate_caches(vstart, length, pstart);
261 break;
262 default:
263 return -EINVAL;
264 }
265
266 return 0;
267}
268
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700269static struct ion_heap_ops kmalloc_ops = {
270 .allocate = ion_system_contig_heap_allocate,
271 .free = ion_system_contig_heap_free,
272 .phys = ion_system_contig_heap_phys,
273 .map_dma = ion_system_contig_heap_map_dma,
274 .unmap_dma = ion_system_heap_unmap_dma,
275 .map_kernel = ion_system_heap_map_kernel,
276 .unmap_kernel = ion_system_heap_unmap_kernel,
277 .map_user = ion_system_contig_heap_map_user,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700278 .cache_op = ion_system_contig_heap_cache_ops,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700279};
280
281struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
282{
283 struct ion_heap *heap;
284
285 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
286 if (!heap)
287 return ERR_PTR(-ENOMEM);
288 heap->ops = &kmalloc_ops;
289 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
290 return heap;
291}
292
293void ion_system_contig_heap_destroy(struct ion_heap *heap)
294{
295 kfree(heap);
296}
297