blob: bef6b6f0b608ffb1923ab0c13a496acc57c09f9b [file] [log] [blame]
Benjamin Gaignard07b590e2012-08-15 10:55:10 -07001/*
2 * drivers/gpu/ion/ion_cma_heap.c
3 *
4 * Copyright (C) Linaro 2012
5 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
19#include <linux/ion.h>
20#include <linux/slab.h>
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/dma-mapping.h>
Laura Abbott4979d972012-08-15 11:09:10 -070024#include <linux/msm_ion.h>
25#include <mach/iommu_domains.h>
26
27#include <asm/cacheflush.h>
Benjamin Gaignard07b590e2012-08-15 10:55:10 -070028
29/* for ion_heap_ops structure */
30#include "ion_priv.h"
31
32#define ION_CMA_ALLOCATE_FAILED -1
33
34struct ion_cma_buffer_info {
35 void *cpu_addr;
36 dma_addr_t handle;
37 struct sg_table *table;
Laura Abbott4979d972012-08-15 11:09:10 -070038 bool is_cached;
Benjamin Gaignard07b590e2012-08-15 10:55:10 -070039};
40
Laura Abbott4979d972012-08-15 11:09:10 -070041static int cma_heap_has_outer_cache;
Benjamin Gaignard07b590e2012-08-15 10:55:10 -070042/*
43 * Create scatter-list for the already allocated DMA buffer.
44 * This function could be replace by dma_common_get_sgtable
45 * as soon as it will avalaible.
46 */
47int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
48 void *cpu_addr, dma_addr_t handle, size_t size)
49{
50 struct page *page = virt_to_page(cpu_addr);
51 int ret;
52
53 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
54 if (unlikely(ret))
55 return ret;
56
57 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
58 return 0;
59}
60
61/* ION CMA heap operations functions */
62static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
63 unsigned long len, unsigned long align,
64 unsigned long flags)
65{
66 struct device *dev = heap->priv;
67 struct ion_cma_buffer_info *info;
68
69 dev_dbg(dev, "Request buffer allocation len %ld\n", len);
70
71 info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
72 if (!info) {
73 dev_err(dev, "Can't allocate buffer info\n");
74 return ION_CMA_ALLOCATE_FAILED;
75 }
76
Laura Abbott4979d972012-08-15 11:09:10 -070077 if (!ION_IS_CACHED(flags))
78 info->cpu_addr = dma_alloc_writecombine(dev, len,
79 &(info->handle), 0);
80 else
81 info->cpu_addr = dma_alloc_nonconsistent(dev, len,
82 &(info->handle), 0);
Benjamin Gaignard07b590e2012-08-15 10:55:10 -070083
84 if (!info->cpu_addr) {
85 dev_err(dev, "Fail to allocate buffer\n");
86 goto err;
87 }
88
89 info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
90 if (!info->table) {
91 dev_err(dev, "Fail to allocate sg table\n");
92 goto err;
93 }
94
Laura Abbott4979d972012-08-15 11:09:10 -070095 info->is_cached = ION_IS_CACHED(flags);
96
Benjamin Gaignard07b590e2012-08-15 10:55:10 -070097 ion_cma_get_sgtable(dev,
98 info->table, info->cpu_addr, info->handle, len);
99
100 /* keep this for memory release */
101 buffer->priv_virt = info;
102 dev_dbg(dev, "Allocate buffer %p\n", buffer);
103 return 0;
104
105err:
106 kfree(info);
107 return ION_CMA_ALLOCATE_FAILED;
108}
109
110static void ion_cma_free(struct ion_buffer *buffer)
111{
112 struct device *dev = buffer->heap->priv;
113 struct ion_cma_buffer_info *info = buffer->priv_virt;
114
115 dev_dbg(dev, "Release buffer %p\n", buffer);
116 /* release memory */
117 dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
118 /* release sg table */
119 kfree(info->table);
120 kfree(info);
121}
122
123/* return physical address in addr */
124static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
125 ion_phys_addr_t *addr, size_t *len)
126{
127 struct device *dev = heap->priv;
128 struct ion_cma_buffer_info *info = buffer->priv_virt;
129
130 dev_dbg(dev, "Return buffer %p physical address 0x%x\n", buffer,
Laura Abbott4979d972012-08-15 11:09:10 -0700131 info->handle);
Benjamin Gaignard07b590e2012-08-15 10:55:10 -0700132
Laura Abbott4979d972012-08-15 11:09:10 -0700133 *addr = info->handle;
Benjamin Gaignard07b590e2012-08-15 10:55:10 -0700134 *len = buffer->size;
135
136 return 0;
137}
138
139struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
140 struct ion_buffer *buffer)
141{
142 struct ion_cma_buffer_info *info = buffer->priv_virt;
143
144 return info->table;
145}
146
147void ion_cma_heap_unmap_dma(struct ion_heap *heap,
148 struct ion_buffer *buffer)
149{
150 return;
151}
152
153static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
154 struct vm_area_struct *vma)
155{
156 struct device *dev = buffer->heap->priv;
157 struct ion_cma_buffer_info *info = buffer->priv_virt;
158
Laura Abbott4979d972012-08-15 11:09:10 -0700159 if (info->is_cached)
160 return dma_mmap_nonconsistent(dev, vma, info->cpu_addr,
161 info->handle, buffer->size);
162 else
163 return dma_mmap_writecombine(dev, vma, info->cpu_addr,
164 info->handle, buffer->size);
165}
166
167static void *ion_cma_map_kernel(struct ion_heap *heap,
168 struct ion_buffer *buffer)
169{
170 struct ion_cma_buffer_info *info = buffer->priv_virt;
171
172 return info->cpu_addr;
173}
174
175static void ion_cma_unmap_kernel(struct ion_heap *heap,
176 struct ion_buffer *buffer)
177{
178 return;
179}
180
181int ion_cma_map_iommu(struct ion_buffer *buffer,
182 struct ion_iommu_map *data,
183 unsigned int domain_num,
184 unsigned int partition_num,
185 unsigned long align,
186 unsigned long iova_length,
187 unsigned long flags)
188{
189 int ret = 0;
190 struct iommu_domain *domain;
191 unsigned long extra;
192 unsigned long extra_iova_addr;
193 struct ion_cma_buffer_info *info = buffer->priv_virt;
194 struct sg_table *table = info->table;
195 int prot = IOMMU_WRITE | IOMMU_READ;
196
197 data->mapped_size = iova_length;
198
199 if (!msm_use_iommu()) {
200 data->iova_addr = info->handle;
201 return 0;
202 }
203
204 extra = iova_length - buffer->size;
205
206 ret = msm_allocate_iova_address(domain_num, partition_num,
207 data->mapped_size, align,
208 &data->iova_addr);
209
210 if (ret)
211 goto out;
212
213 domain = msm_get_iommu_domain(domain_num);
214
215 if (!domain) {
216 ret = -EINVAL;
217 goto out1;
218 }
219
220 ret = iommu_map_range(domain, data->iova_addr, table->sgl,
221 buffer->size, prot);
222
223 if (ret) {
224 pr_err("%s: could not map %lx in domain %p\n",
225 __func__, data->iova_addr, domain);
226 goto out1;
227 }
228
229 extra_iova_addr = data->iova_addr + buffer->size;
230 if (extra) {
231 ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
232 prot);
233 if (ret)
234 goto out2;
235 }
236 return ret;
237
238out2:
239 iommu_unmap_range(domain, data->iova_addr, buffer->size);
240out1:
241 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
242 data->mapped_size);
243out:
244 return ret;
245}
246
247
248void ion_cma_unmap_iommu(struct ion_iommu_map *data)
249{
250 unsigned int domain_num;
251 unsigned int partition_num;
252 struct iommu_domain *domain;
253
254 if (!msm_use_iommu())
255 return;
256
257 domain_num = iommu_map_domain(data);
258 partition_num = iommu_map_partition(data);
259
260 domain = msm_get_iommu_domain(domain_num);
261
262 if (!domain) {
263 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
264 return;
265 }
266
267 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
268 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
269 data->mapped_size);
270
271 return;
272}
273
274int ion_cma_cache_ops(struct ion_heap *heap,
275 struct ion_buffer *buffer, void *vaddr,
276 unsigned int offset, unsigned int length,
277 unsigned int cmd)
278{
279 void (*outer_cache_op)(phys_addr_t, phys_addr_t);
280
281 switch (cmd) {
282 case ION_IOC_CLEAN_CACHES:
283 dmac_clean_range(vaddr, vaddr + length);
284 outer_cache_op = outer_clean_range;
285 break;
286 case ION_IOC_INV_CACHES:
287 dmac_inv_range(vaddr, vaddr + length);
288 outer_cache_op = outer_inv_range;
289 break;
290 case ION_IOC_CLEAN_INV_CACHES:
291 dmac_flush_range(vaddr, vaddr + length);
292 outer_cache_op = outer_flush_range;
293 break;
294 default:
295 return -EINVAL;
296 }
297
298 if (cma_heap_has_outer_cache) {
299 struct ion_cma_buffer_info *info = buffer->priv_virt;
300
301 outer_cache_op(info->handle, info->handle + length);
302 }
303
304 return 0;
Benjamin Gaignard07b590e2012-08-15 10:55:10 -0700305}
306
307static struct ion_heap_ops ion_cma_ops = {
308 .allocate = ion_cma_allocate,
309 .free = ion_cma_free,
310 .map_dma = ion_cma_heap_map_dma,
311 .unmap_dma = ion_cma_heap_unmap_dma,
312 .phys = ion_cma_phys,
313 .map_user = ion_cma_mmap,
Laura Abbott4979d972012-08-15 11:09:10 -0700314 .map_kernel = ion_cma_map_kernel,
315 .unmap_kernel = ion_cma_unmap_kernel,
316 .map_iommu = ion_cma_map_iommu,
317 .unmap_iommu = ion_cma_unmap_iommu,
318 .cache_op = ion_cma_cache_ops,
Benjamin Gaignard07b590e2012-08-15 10:55:10 -0700319};
320
321struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
322{
323 struct ion_heap *heap;
324
325 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
326
327 if (!heap)
328 return ERR_PTR(-ENOMEM);
329
330 heap->ops = &ion_cma_ops;
331 /* set device as private heaps data, later it will be
332 * used to make the link with reserved CMA memory */
333 heap->priv = data->priv;
334 heap->type = ION_HEAP_TYPE_DMA;
Laura Abbott4979d972012-08-15 11:09:10 -0700335 cma_heap_has_outer_cache = data->has_outer_cache;
Benjamin Gaignard07b590e2012-08-15 10:55:10 -0700336 return heap;
337}
338
339void ion_cma_heap_destroy(struct ion_heap *heap)
340{
341 kfree(heap);
342}