blob: 3be3a007ffdf7de4532844fb7b8d1333ab8a84ed [file] [log] [blame]
Laura Abbotta8c373f2013-02-15 09:25:35 -08001/*
2 * drivers/gpu/ion/ion_secure_cma_heap.c
3 *
4 * Copyright (C) Linaro 2012
5 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
6 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/device.h>
20#include <linux/ion.h>
21#include <linux/slab.h>
22#include <linux/errno.h>
23#include <linux/err.h>
24#include <linux/dma-mapping.h>
25#include <linux/msm_ion.h>
26#include <mach/iommu_domains.h>
27
28#include <asm/cacheflush.h>
29
30/* for ion_heap_ops structure */
31#include "ion_priv.h"
32#include "msm/ion_cp_common.h"
33
34#define ION_CMA_ALLOCATE_FAILED NULL
35
36struct ion_secure_cma_buffer_info {
37 /*
38 * This needs to come first for compatibility with the secure buffer API
39 */
40 struct ion_cp_buffer secure;
41 void *cpu_addr;
42 dma_addr_t handle;
43 struct sg_table *table;
44 bool is_cached;
45};
46
47static int cma_heap_has_outer_cache;
48/*
49 * Create scatter-list for the already allocated DMA buffer.
50 * This function could be replace by dma_common_get_sgtable
51 * as soon as it will avalaible.
52 */
53int ion_secure_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
54 void *cpu_addr, dma_addr_t handle, size_t size)
55{
56 struct page *page = virt_to_page(cpu_addr);
57 int ret;
58
59 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
60 if (unlikely(ret))
61 return ret;
62
63 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
64 sg_dma_address(sgt->sgl) = handle;
65 return 0;
66}
67
68/* ION CMA heap operations functions */
69static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate(
70 struct ion_heap *heap, struct ion_buffer *buffer,
71 unsigned long len, unsigned long align,
72 unsigned long flags)
73{
74 struct device *dev = heap->priv;
75 struct ion_secure_cma_buffer_info *info;
76
77 dev_dbg(dev, "Request buffer allocation len %ld\n", len);
78
79 info = kzalloc(sizeof(struct ion_secure_cma_buffer_info), GFP_KERNEL);
80 if (!info) {
81 dev_err(dev, "Can't allocate buffer info\n");
82 return ION_CMA_ALLOCATE_FAILED;
83 }
84
85 if (!ION_IS_CACHED(flags))
86 info->cpu_addr = dma_alloc_writecombine(dev, len,
87 &(info->handle), 0);
88 else
89 info->cpu_addr = dma_alloc_nonconsistent(dev, len,
90 &(info->handle), 0);
91
92 if (!info->cpu_addr) {
93 dev_err(dev, "Fail to allocate buffer\n");
94 goto err;
95 }
96
97 info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
98 if (!info->table) {
99 dev_err(dev, "Fail to allocate sg table\n");
100 goto err;
101 }
102
103 info->is_cached = ION_IS_CACHED(flags);
104
105 ion_secure_cma_get_sgtable(dev,
106 info->table, info->cpu_addr, info->handle, len);
107
108 info->secure.buffer = info->handle;
109
110 /* keep this for memory release */
111 buffer->priv_virt = info;
112 dev_dbg(dev, "Allocate buffer %p\n", buffer);
113 return info;
114
115err:
116 kfree(info);
117 return ION_CMA_ALLOCATE_FAILED;
118}
119
120static int ion_secure_cma_allocate(struct ion_heap *heap,
121 struct ion_buffer *buffer,
122 unsigned long len, unsigned long align,
123 unsigned long flags)
124{
125 unsigned long secure_allocation = flags & ION_SECURE;
126 struct ion_secure_cma_buffer_info *buf = NULL;
127
128 if (!secure_allocation) {
129 pr_err("%s: non-secure allocation disallowed from heap %s %lx\n",
130 __func__, heap->name, flags);
131 return -ENOMEM;
132 }
133
134 buf = __ion_secure_cma_allocate(heap, buffer, len, align, flags);
135
136 if (buf) {
137 buf->secure.want_delayed_unsecure = 0;
138 atomic_set(&buf->secure.secure_cnt, 0);
139 mutex_init(&buf->secure.lock);
140 buf->secure.is_secure = 1;
141 return 0;
142 } else {
143 return -ENOMEM;
144 }
145}
146
147
148static void ion_secure_cma_free(struct ion_buffer *buffer)
149{
150 struct device *dev = buffer->heap->priv;
151 struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
152
153 dev_dbg(dev, "Release buffer %p\n", buffer);
154 /* release memory */
155 dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
156 /* release sg table */
157 kfree(info->table);
158 kfree(info);
159}
160
161static int ion_secure_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
162 ion_phys_addr_t *addr, size_t *len)
163{
164 struct device *dev = heap->priv;
165 struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
166
Laura Abbottbd4af162013-03-18 11:14:47 -0700167 dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
168 &info->handle);
Laura Abbotta8c373f2013-02-15 09:25:35 -0800169
170 *addr = info->handle;
171 *len = buffer->size;
172
173 return 0;
174}
175
176struct sg_table *ion_secure_cma_heap_map_dma(struct ion_heap *heap,
177 struct ion_buffer *buffer)
178{
179 struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
180
181 return info->table;
182}
183
184void ion_secure_cma_heap_unmap_dma(struct ion_heap *heap,
185 struct ion_buffer *buffer)
186{
187 return;
188}
189
190static int ion_secure_cma_mmap(struct ion_heap *mapper,
191 struct ion_buffer *buffer,
192 struct vm_area_struct *vma)
193{
194 return -EINVAL;
195}
196
197static void *ion_secure_cma_map_kernel(struct ion_heap *heap,
198 struct ion_buffer *buffer)
199{
200 struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
201
202 atomic_inc(&info->secure.map_cnt);
203 return info->cpu_addr;
204}
205
206static void ion_secure_cma_unmap_kernel(struct ion_heap *heap,
207 struct ion_buffer *buffer)
208{
209 struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
210
211 atomic_dec(&info->secure.map_cnt);
212 return;
213}
214
215int ion_secure_cma_map_iommu(struct ion_buffer *buffer,
216 struct ion_iommu_map *data,
217 unsigned int domain_num,
218 unsigned int partition_num,
219 unsigned long align,
220 unsigned long iova_length,
221 unsigned long flags)
222{
223 int ret = 0;
224 struct iommu_domain *domain;
225 unsigned long extra;
226 unsigned long extra_iova_addr;
227 struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
228 struct sg_table *table = info->table;
229 int prot = IOMMU_WRITE | IOMMU_READ;
230
231 data->mapped_size = iova_length;
232
233 if (!msm_use_iommu()) {
234 data->iova_addr = info->handle;
235 return 0;
236 }
237
238 extra = iova_length - buffer->size;
239
240 ret = msm_allocate_iova_address(domain_num, partition_num,
241 data->mapped_size, align,
242 &data->iova_addr);
243
244 if (ret)
245 goto out;
246
247 domain = msm_get_iommu_domain(domain_num);
248
249 if (!domain) {
250 ret = -EINVAL;
251 goto out1;
252 }
253
254 ret = iommu_map_range(domain, data->iova_addr, table->sgl,
255 buffer->size, prot);
256
257 if (ret) {
258 pr_err("%s: could not map %lx in domain %p\n",
259 __func__, data->iova_addr, domain);
260 goto out1;
261 }
262
263 extra_iova_addr = data->iova_addr + buffer->size;
264 if (extra) {
Mitchel Humpherys76708e42013-02-20 12:41:01 -0800265 unsigned long phys_addr = sg_phys(table->sgl);
266 ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
267 extra, SZ_4K, prot);
Laura Abbotta8c373f2013-02-15 09:25:35 -0800268 if (ret)
269 goto out2;
270 }
271 return ret;
272
273out2:
274 iommu_unmap_range(domain, data->iova_addr, buffer->size);
275out1:
276 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
277 data->mapped_size);
278out:
279 return ret;
280}
281
282
283void ion_secure_cma_unmap_iommu(struct ion_iommu_map *data)
284{
285 unsigned int domain_num;
286 unsigned int partition_num;
287 struct iommu_domain *domain;
288
289 if (!msm_use_iommu())
290 return;
291
292 domain_num = iommu_map_domain(data);
293 partition_num = iommu_map_partition(data);
294
295 domain = msm_get_iommu_domain(domain_num);
296
297 if (!domain) {
298 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
299 return;
300 }
301
302 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
303 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
304 data->mapped_size);
305
306 return;
307}
308
309int ion_secure_cma_cache_ops(struct ion_heap *heap,
310 struct ion_buffer *buffer, void *vaddr,
311 unsigned int offset, unsigned int length,
312 unsigned int cmd)
313{
314 void (*outer_cache_op)(phys_addr_t, phys_addr_t);
315
316 switch (cmd) {
317 case ION_IOC_CLEAN_CACHES:
318 dmac_clean_range(vaddr, vaddr + length);
319 outer_cache_op = outer_clean_range;
320 break;
321 case ION_IOC_INV_CACHES:
322 dmac_inv_range(vaddr, vaddr + length);
323 outer_cache_op = outer_inv_range;
324 break;
325 case ION_IOC_CLEAN_INV_CACHES:
326 dmac_flush_range(vaddr, vaddr + length);
327 outer_cache_op = outer_flush_range;
328 break;
329 default:
330 return -EINVAL;
331 }
332
333 if (cma_heap_has_outer_cache) {
334 struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
335
336 outer_cache_op(info->handle, info->handle + length);
337 }
338
339 return 0;
340}
341
342static int ion_secure_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
343 const struct rb_root *mem_map)
344{
345 if (mem_map) {
346 struct rb_node *n;
347
348 seq_printf(s, "\nMemory Map\n");
349 seq_printf(s, "%16.s %14.s %14.s %14.s\n",
350 "client", "start address", "end address",
351 "size (hex)");
352
353 for (n = rb_first(mem_map); n; n = rb_next(n)) {
354 struct mem_map_data *data =
355 rb_entry(n, struct mem_map_data, node);
356 const char *client_name = "(null)";
357
358
359 if (data->client_name)
360 client_name = data->client_name;
361
Laura Abbott1135c9e2013-03-13 15:33:40 -0700362 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
363 client_name, &data->addr,
364 &data->addr_end,
Laura Abbotta8c373f2013-02-15 09:25:35 -0800365 data->size, data->size);
366 }
367 }
368 return 0;
369}
370
371static struct ion_heap_ops ion_secure_cma_ops = {
372 .allocate = ion_secure_cma_allocate,
373 .free = ion_secure_cma_free,
374 .map_dma = ion_secure_cma_heap_map_dma,
375 .unmap_dma = ion_secure_cma_heap_unmap_dma,
376 .phys = ion_secure_cma_phys,
377 .map_user = ion_secure_cma_mmap,
378 .map_kernel = ion_secure_cma_map_kernel,
379 .unmap_kernel = ion_secure_cma_unmap_kernel,
380 .map_iommu = ion_secure_cma_map_iommu,
381 .unmap_iommu = ion_secure_cma_unmap_iommu,
382 .cache_op = ion_secure_cma_cache_ops,
383 .print_debug = ion_secure_cma_print_debug,
384 .secure_buffer = ion_cp_secure_buffer,
385 .unsecure_buffer = ion_cp_unsecure_buffer,
386};
387
388struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *data)
389{
390 struct ion_heap *heap;
391
392 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
393
394 if (!heap)
395 return ERR_PTR(-ENOMEM);
396
397 heap->ops = &ion_secure_cma_ops;
398 /* set device as private heaps data, later it will be
399 * used to make the link with reserved CMA memory */
400 heap->priv = data->priv;
401 heap->type = ION_HEAP_TYPE_SECURE_DMA;
402 cma_heap_has_outer_cache = data->has_outer_cache;
403 return heap;
404}
405
406void ion_secure_cma_heap_destroy(struct ion_heap *heap)
407{
408 kfree(heap);
409}