blob: d7a59206dbd93ccc77a31f560de6db47dcdd9905 [file] [log] [blame]
Laura Abbotta8c373f2013-02-15 09:25:35 -08001/*
2 * drivers/gpu/ion/ion_secure_cma_heap.c
3 *
4 * Copyright (C) Linaro 2012
5 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
6 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/device.h>
20#include <linux/ion.h>
21#include <linux/slab.h>
22#include <linux/errno.h>
23#include <linux/err.h>
24#include <linux/dma-mapping.h>
25#include <linux/msm_ion.h>
26#include <mach/iommu_domains.h>
27
28#include <asm/cacheflush.h>
29
30/* for ion_heap_ops structure */
31#include "ion_priv.h"
32#include "msm/ion_cp_common.h"
33
34#define ION_CMA_ALLOCATE_FAILED NULL
35
36struct ion_secure_cma_buffer_info {
37 /*
38 * This needs to come first for compatibility with the secure buffer API
39 */
40 struct ion_cp_buffer secure;
41 void *cpu_addr;
42 dma_addr_t handle;
43 struct sg_table *table;
44 bool is_cached;
45};
46
47static int cma_heap_has_outer_cache;
48/*
49 * Create scatter-list for the already allocated DMA buffer.
50 * This function could be replace by dma_common_get_sgtable
51 * as soon as it will avalaible.
52 */
53int ion_secure_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
54 void *cpu_addr, dma_addr_t handle, size_t size)
55{
56 struct page *page = virt_to_page(cpu_addr);
57 int ret;
58
59 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
60 if (unlikely(ret))
61 return ret;
62
63 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
64 sg_dma_address(sgt->sgl) = handle;
65 return 0;
66}
67
68/* ION CMA heap operations functions */
69static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate(
70 struct ion_heap *heap, struct ion_buffer *buffer,
71 unsigned long len, unsigned long align,
72 unsigned long flags)
73{
74 struct device *dev = heap->priv;
75 struct ion_secure_cma_buffer_info *info;
Laura Abbott532c2ab2013-03-20 12:35:39 -070076 DEFINE_DMA_ATTRS(attrs);
77 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
Laura Abbotta8c373f2013-02-15 09:25:35 -080078
79 dev_dbg(dev, "Request buffer allocation len %ld\n", len);
80
81 info = kzalloc(sizeof(struct ion_secure_cma_buffer_info), GFP_KERNEL);
82 if (!info) {
83 dev_err(dev, "Can't allocate buffer info\n");
84 return ION_CMA_ALLOCATE_FAILED;
85 }
86
Laura Abbott532c2ab2013-03-20 12:35:39 -070087 info->cpu_addr = dma_alloc_attrs(dev, len, &(info->handle), 0, &attrs);
Laura Abbotta8c373f2013-02-15 09:25:35 -080088
89 if (!info->cpu_addr) {
90 dev_err(dev, "Fail to allocate buffer\n");
91 goto err;
92 }
93
94 info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
95 if (!info->table) {
96 dev_err(dev, "Fail to allocate sg table\n");
97 goto err;
98 }
99
Laura Abbotta8c373f2013-02-15 09:25:35 -0800100 ion_secure_cma_get_sgtable(dev,
101 info->table, info->cpu_addr, info->handle, len);
102
103 info->secure.buffer = info->handle;
104
105 /* keep this for memory release */
106 buffer->priv_virt = info;
107 dev_dbg(dev, "Allocate buffer %p\n", buffer);
108 return info;
109
110err:
111 kfree(info);
112 return ION_CMA_ALLOCATE_FAILED;
113}
114
115static int ion_secure_cma_allocate(struct ion_heap *heap,
116 struct ion_buffer *buffer,
117 unsigned long len, unsigned long align,
118 unsigned long flags)
119{
120 unsigned long secure_allocation = flags & ION_SECURE;
121 struct ion_secure_cma_buffer_info *buf = NULL;
122
123 if (!secure_allocation) {
124 pr_err("%s: non-secure allocation disallowed from heap %s %lx\n",
125 __func__, heap->name, flags);
126 return -ENOMEM;
127 }
128
Laura Abbott532c2ab2013-03-20 12:35:39 -0700129 if (ION_IS_CACHED(flags)) {
130 pr_err("%s: cannot allocate cached memory from secure heap %s\n",
131 __func__, heap->name);
132 return -ENOMEM;
133 }
134
135
Laura Abbotta8c373f2013-02-15 09:25:35 -0800136 buf = __ion_secure_cma_allocate(heap, buffer, len, align, flags);
137
138 if (buf) {
139 buf->secure.want_delayed_unsecure = 0;
140 atomic_set(&buf->secure.secure_cnt, 0);
141 mutex_init(&buf->secure.lock);
142 buf->secure.is_secure = 1;
143 return 0;
144 } else {
145 return -ENOMEM;
146 }
147}
148
149
150static void ion_secure_cma_free(struct ion_buffer *buffer)
151{
152 struct device *dev = buffer->heap->priv;
153 struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
154
155 dev_dbg(dev, "Release buffer %p\n", buffer);
156 /* release memory */
157 dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
158 /* release sg table */
159 kfree(info->table);
160 kfree(info);
161}
162
163static int ion_secure_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
164 ion_phys_addr_t *addr, size_t *len)
165{
166 struct device *dev = heap->priv;
167 struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
168
Laura Abbottbd4af162013-03-18 11:14:47 -0700169 dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
170 &info->handle);
Laura Abbotta8c373f2013-02-15 09:25:35 -0800171
172 *addr = info->handle;
173 *len = buffer->size;
174
175 return 0;
176}
177
178struct sg_table *ion_secure_cma_heap_map_dma(struct ion_heap *heap,
179 struct ion_buffer *buffer)
180{
181 struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
182
183 return info->table;
184}
185
186void ion_secure_cma_heap_unmap_dma(struct ion_heap *heap,
187 struct ion_buffer *buffer)
188{
189 return;
190}
191
192static int ion_secure_cma_mmap(struct ion_heap *mapper,
193 struct ion_buffer *buffer,
194 struct vm_area_struct *vma)
195{
Laura Abbott532c2ab2013-03-20 12:35:39 -0700196 pr_info("%s: mmaping from secure heap %s disallowed\n",
197 __func__, mapper->name);
Laura Abbotta8c373f2013-02-15 09:25:35 -0800198 return -EINVAL;
199}
200
201static void *ion_secure_cma_map_kernel(struct ion_heap *heap,
202 struct ion_buffer *buffer)
203{
Laura Abbott532c2ab2013-03-20 12:35:39 -0700204 pr_info("%s: kernel mapping from secure heap %s disallowed\n",
205 __func__, heap->name);
206 return NULL;
Laura Abbotta8c373f2013-02-15 09:25:35 -0800207}
208
209static void ion_secure_cma_unmap_kernel(struct ion_heap *heap,
210 struct ion_buffer *buffer)
211{
Laura Abbotta8c373f2013-02-15 09:25:35 -0800212 return;
213}
214
215int ion_secure_cma_map_iommu(struct ion_buffer *buffer,
216 struct ion_iommu_map *data,
217 unsigned int domain_num,
218 unsigned int partition_num,
219 unsigned long align,
220 unsigned long iova_length,
221 unsigned long flags)
222{
223 int ret = 0;
224 struct iommu_domain *domain;
225 unsigned long extra;
226 unsigned long extra_iova_addr;
227 struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
228 struct sg_table *table = info->table;
229 int prot = IOMMU_WRITE | IOMMU_READ;
230
231 data->mapped_size = iova_length;
232
233 if (!msm_use_iommu()) {
234 data->iova_addr = info->handle;
235 return 0;
236 }
237
238 extra = iova_length - buffer->size;
239
240 ret = msm_allocate_iova_address(domain_num, partition_num,
241 data->mapped_size, align,
242 &data->iova_addr);
243
244 if (ret)
245 goto out;
246
247 domain = msm_get_iommu_domain(domain_num);
248
249 if (!domain) {
250 ret = -EINVAL;
251 goto out1;
252 }
253
254 ret = iommu_map_range(domain, data->iova_addr, table->sgl,
255 buffer->size, prot);
256
257 if (ret) {
258 pr_err("%s: could not map %lx in domain %p\n",
259 __func__, data->iova_addr, domain);
260 goto out1;
261 }
262
263 extra_iova_addr = data->iova_addr + buffer->size;
264 if (extra) {
Mitchel Humpherys76708e42013-02-20 12:41:01 -0800265 unsigned long phys_addr = sg_phys(table->sgl);
266 ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
267 extra, SZ_4K, prot);
Laura Abbotta8c373f2013-02-15 09:25:35 -0800268 if (ret)
269 goto out2;
270 }
271 return ret;
272
273out2:
274 iommu_unmap_range(domain, data->iova_addr, buffer->size);
275out1:
276 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
277 data->mapped_size);
278out:
279 return ret;
280}
281
282
283void ion_secure_cma_unmap_iommu(struct ion_iommu_map *data)
284{
285 unsigned int domain_num;
286 unsigned int partition_num;
287 struct iommu_domain *domain;
288
289 if (!msm_use_iommu())
290 return;
291
292 domain_num = iommu_map_domain(data);
293 partition_num = iommu_map_partition(data);
294
295 domain = msm_get_iommu_domain(domain_num);
296
297 if (!domain) {
298 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
299 return;
300 }
301
302 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
303 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
304 data->mapped_size);
305
306 return;
307}
308
309int ion_secure_cma_cache_ops(struct ion_heap *heap,
310 struct ion_buffer *buffer, void *vaddr,
311 unsigned int offset, unsigned int length,
312 unsigned int cmd)
313{
Laura Abbott532c2ab2013-03-20 12:35:39 -0700314 pr_info("%s: cache operations disallowed from secure heap %s\n",
315 __func__, heap->name);
316 return -EINVAL;
Laura Abbotta8c373f2013-02-15 09:25:35 -0800317}
318
319static int ion_secure_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
320 const struct rb_root *mem_map)
321{
322 if (mem_map) {
323 struct rb_node *n;
324
325 seq_printf(s, "\nMemory Map\n");
326 seq_printf(s, "%16.s %14.s %14.s %14.s\n",
327 "client", "start address", "end address",
328 "size (hex)");
329
330 for (n = rb_first(mem_map); n; n = rb_next(n)) {
331 struct mem_map_data *data =
332 rb_entry(n, struct mem_map_data, node);
333 const char *client_name = "(null)";
334
335
336 if (data->client_name)
337 client_name = data->client_name;
338
Laura Abbott1135c9e2013-03-13 15:33:40 -0700339 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
340 client_name, &data->addr,
341 &data->addr_end,
Laura Abbotta8c373f2013-02-15 09:25:35 -0800342 data->size, data->size);
343 }
344 }
345 return 0;
346}
347
348static struct ion_heap_ops ion_secure_cma_ops = {
349 .allocate = ion_secure_cma_allocate,
350 .free = ion_secure_cma_free,
351 .map_dma = ion_secure_cma_heap_map_dma,
352 .unmap_dma = ion_secure_cma_heap_unmap_dma,
353 .phys = ion_secure_cma_phys,
354 .map_user = ion_secure_cma_mmap,
355 .map_kernel = ion_secure_cma_map_kernel,
356 .unmap_kernel = ion_secure_cma_unmap_kernel,
357 .map_iommu = ion_secure_cma_map_iommu,
358 .unmap_iommu = ion_secure_cma_unmap_iommu,
359 .cache_op = ion_secure_cma_cache_ops,
360 .print_debug = ion_secure_cma_print_debug,
361 .secure_buffer = ion_cp_secure_buffer,
362 .unsecure_buffer = ion_cp_unsecure_buffer,
363};
364
365struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *data)
366{
367 struct ion_heap *heap;
368
369 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
370
371 if (!heap)
372 return ERR_PTR(-ENOMEM);
373
374 heap->ops = &ion_secure_cma_ops;
375 /* set device as private heaps data, later it will be
376 * used to make the link with reserved CMA memory */
377 heap->priv = data->priv;
378 heap->type = ION_HEAP_TYPE_SECURE_DMA;
379 cma_heap_has_outer_cache = data->has_outer_cache;
380 return heap;
381}
382
383void ion_secure_cma_heap_destroy(struct ion_heap *heap)
384{
385 kfree(heap);
386}