blob: b3960b29160faa0f9fb6728a10b43c87b79a13dc [file] [log] [blame]
Laura Abbotta8c373f2013-02-15 09:25:35 -08001/*
2 * drivers/gpu/ion/ion_secure_cma_heap.c
3 *
4 * Copyright (C) Linaro 2012
5 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
6 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/device.h>
20#include <linux/ion.h>
21#include <linux/slab.h>
22#include <linux/errno.h>
23#include <linux/err.h>
24#include <linux/dma-mapping.h>
25#include <linux/msm_ion.h>
26#include <mach/iommu_domains.h>
27
28#include <asm/cacheflush.h>
29
30/* for ion_heap_ops structure */
31#include "ion_priv.h"
32#include "msm/ion_cp_common.h"
33
34#define ION_CMA_ALLOCATE_FAILED NULL
35
36struct ion_secure_cma_buffer_info {
37 /*
38 * This needs to come first for compatibility with the secure buffer API
39 */
40 struct ion_cp_buffer secure;
41 void *cpu_addr;
42 dma_addr_t handle;
43 struct sg_table *table;
44 bool is_cached;
45};
46
Laura Abbotta8c373f2013-02-15 09:25:35 -080047/*
48 * Create scatter-list for the already allocated DMA buffer.
49 * This function could be replace by dma_common_get_sgtable
50 * as soon as it will avalaible.
51 */
52int ion_secure_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
53 void *cpu_addr, dma_addr_t handle, size_t size)
54{
Laura Abbottf2f42852013-04-15 11:15:54 -070055 struct page *page = phys_to_page(handle);
Laura Abbotta8c373f2013-02-15 09:25:35 -080056 int ret;
57
58 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
59 if (unlikely(ret))
60 return ret;
61
62 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
63 sg_dma_address(sgt->sgl) = handle;
64 return 0;
65}
66
67/* ION CMA heap operations functions */
68static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate(
69 struct ion_heap *heap, struct ion_buffer *buffer,
70 unsigned long len, unsigned long align,
71 unsigned long flags)
72{
73 struct device *dev = heap->priv;
74 struct ion_secure_cma_buffer_info *info;
Laura Abbott532c2ab2013-03-20 12:35:39 -070075 DEFINE_DMA_ATTRS(attrs);
76 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
Laura Abbotta8c373f2013-02-15 09:25:35 -080077
78 dev_dbg(dev, "Request buffer allocation len %ld\n", len);
79
80 info = kzalloc(sizeof(struct ion_secure_cma_buffer_info), GFP_KERNEL);
81 if (!info) {
82 dev_err(dev, "Can't allocate buffer info\n");
83 return ION_CMA_ALLOCATE_FAILED;
84 }
85
Laura Abbott532c2ab2013-03-20 12:35:39 -070086 info->cpu_addr = dma_alloc_attrs(dev, len, &(info->handle), 0, &attrs);
Laura Abbotta8c373f2013-02-15 09:25:35 -080087
88 if (!info->cpu_addr) {
89 dev_err(dev, "Fail to allocate buffer\n");
90 goto err;
91 }
92
93 info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
94 if (!info->table) {
95 dev_err(dev, "Fail to allocate sg table\n");
96 goto err;
97 }
98
Laura Abbotta8c373f2013-02-15 09:25:35 -080099 ion_secure_cma_get_sgtable(dev,
100 info->table, info->cpu_addr, info->handle, len);
101
102 info->secure.buffer = info->handle;
103
104 /* keep this for memory release */
105 buffer->priv_virt = info;
106 dev_dbg(dev, "Allocate buffer %p\n", buffer);
107 return info;
108
109err:
110 kfree(info);
111 return ION_CMA_ALLOCATE_FAILED;
112}
113
114static int ion_secure_cma_allocate(struct ion_heap *heap,
115 struct ion_buffer *buffer,
116 unsigned long len, unsigned long align,
117 unsigned long flags)
118{
Adrian Alexei92538592013-03-27 10:53:43 -0700119 unsigned long secure_allocation = flags & ION_FLAG_SECURE;
Laura Abbotta8c373f2013-02-15 09:25:35 -0800120 struct ion_secure_cma_buffer_info *buf = NULL;
121
122 if (!secure_allocation) {
123 pr_err("%s: non-secure allocation disallowed from heap %s %lx\n",
124 __func__, heap->name, flags);
125 return -ENOMEM;
126 }
127
Laura Abbott532c2ab2013-03-20 12:35:39 -0700128 if (ION_IS_CACHED(flags)) {
129 pr_err("%s: cannot allocate cached memory from secure heap %s\n",
130 __func__, heap->name);
131 return -ENOMEM;
132 }
133
134
Laura Abbotta8c373f2013-02-15 09:25:35 -0800135 buf = __ion_secure_cma_allocate(heap, buffer, len, align, flags);
136
137 if (buf) {
138 buf->secure.want_delayed_unsecure = 0;
139 atomic_set(&buf->secure.secure_cnt, 0);
140 mutex_init(&buf->secure.lock);
141 buf->secure.is_secure = 1;
142 return 0;
143 } else {
144 return -ENOMEM;
145 }
146}
147
148
149static void ion_secure_cma_free(struct ion_buffer *buffer)
150{
151 struct device *dev = buffer->heap->priv;
152 struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
153
154 dev_dbg(dev, "Release buffer %p\n", buffer);
155 /* release memory */
156 dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
Laura Abbottab439ca2013-05-16 13:00:08 -0700157 sg_free_table(info->table);
Laura Abbotta8c373f2013-02-15 09:25:35 -0800158 /* release sg table */
159 kfree(info->table);
160 kfree(info);
161}
162
163static int ion_secure_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
164 ion_phys_addr_t *addr, size_t *len)
165{
166 struct device *dev = heap->priv;
167 struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
168
Laura Abbottbd4af162013-03-18 11:14:47 -0700169 dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
170 &info->handle);
Laura Abbotta8c373f2013-02-15 09:25:35 -0800171
172 *addr = info->handle;
173 *len = buffer->size;
174
175 return 0;
176}
177
178struct sg_table *ion_secure_cma_heap_map_dma(struct ion_heap *heap,
179 struct ion_buffer *buffer)
180{
181 struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
182
183 return info->table;
184}
185
186void ion_secure_cma_heap_unmap_dma(struct ion_heap *heap,
187 struct ion_buffer *buffer)
188{
189 return;
190}
191
192static int ion_secure_cma_mmap(struct ion_heap *mapper,
193 struct ion_buffer *buffer,
194 struct vm_area_struct *vma)
195{
Laura Abbott532c2ab2013-03-20 12:35:39 -0700196 pr_info("%s: mmaping from secure heap %s disallowed\n",
197 __func__, mapper->name);
Laura Abbotta8c373f2013-02-15 09:25:35 -0800198 return -EINVAL;
199}
200
201static void *ion_secure_cma_map_kernel(struct ion_heap *heap,
202 struct ion_buffer *buffer)
203{
Laura Abbott532c2ab2013-03-20 12:35:39 -0700204 pr_info("%s: kernel mapping from secure heap %s disallowed\n",
205 __func__, heap->name);
206 return NULL;
Laura Abbotta8c373f2013-02-15 09:25:35 -0800207}
208
209static void ion_secure_cma_unmap_kernel(struct ion_heap *heap,
210 struct ion_buffer *buffer)
211{
Laura Abbotta8c373f2013-02-15 09:25:35 -0800212 return;
213}
214
Laura Abbotta8c373f2013-02-15 09:25:35 -0800215static int ion_secure_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
216 const struct rb_root *mem_map)
217{
218 if (mem_map) {
219 struct rb_node *n;
220
221 seq_printf(s, "\nMemory Map\n");
222 seq_printf(s, "%16.s %14.s %14.s %14.s\n",
223 "client", "start address", "end address",
224 "size (hex)");
225
226 for (n = rb_first(mem_map); n; n = rb_next(n)) {
227 struct mem_map_data *data =
228 rb_entry(n, struct mem_map_data, node);
229 const char *client_name = "(null)";
230
231
232 if (data->client_name)
233 client_name = data->client_name;
234
Laura Abbott1135c9e2013-03-13 15:33:40 -0700235 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
236 client_name, &data->addr,
237 &data->addr_end,
Laura Abbotta8c373f2013-02-15 09:25:35 -0800238 data->size, data->size);
239 }
240 }
241 return 0;
242}
243
244static struct ion_heap_ops ion_secure_cma_ops = {
245 .allocate = ion_secure_cma_allocate,
246 .free = ion_secure_cma_free,
247 .map_dma = ion_secure_cma_heap_map_dma,
248 .unmap_dma = ion_secure_cma_heap_unmap_dma,
249 .phys = ion_secure_cma_phys,
250 .map_user = ion_secure_cma_mmap,
251 .map_kernel = ion_secure_cma_map_kernel,
252 .unmap_kernel = ion_secure_cma_unmap_kernel,
Laura Abbotta8c373f2013-02-15 09:25:35 -0800253 .print_debug = ion_secure_cma_print_debug,
254 .secure_buffer = ion_cp_secure_buffer,
255 .unsecure_buffer = ion_cp_unsecure_buffer,
256};
257
258struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *data)
259{
260 struct ion_heap *heap;
261
262 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
263
264 if (!heap)
265 return ERR_PTR(-ENOMEM);
266
267 heap->ops = &ion_secure_cma_ops;
268 /* set device as private heaps data, later it will be
269 * used to make the link with reserved CMA memory */
270 heap->priv = data->priv;
271 heap->type = ION_HEAP_TYPE_SECURE_DMA;
Laura Abbotta8c373f2013-02-15 09:25:35 -0800272 return heap;
273}
274
275void ion_secure_cma_heap_destroy(struct ion_heap *heap)
276{
277 kfree(heap);
278}