blob: e1b3eea5147807d90cd071e0ac8fb1ea21246f5f [file] [log] [blame]
Laura Abbotta8c373f2013-02-15 09:25:35 -08001/*
2 * drivers/gpu/ion/ion_secure_cma_heap.c
3 *
4 * Copyright (C) Linaro 2012
5 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
6 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/device.h>
20#include <linux/ion.h>
21#include <linux/slab.h>
22#include <linux/errno.h>
23#include <linux/err.h>
24#include <linux/dma-mapping.h>
25#include <linux/msm_ion.h>
26#include <mach/iommu_domains.h>
27
28#include <asm/cacheflush.h>
29
30/* for ion_heap_ops structure */
31#include "ion_priv.h"
32#include "msm/ion_cp_common.h"
33
34#define ION_CMA_ALLOCATE_FAILED NULL
35
36struct ion_secure_cma_buffer_info {
37 /*
38 * This needs to come first for compatibility with the secure buffer API
39 */
40 struct ion_cp_buffer secure;
41 void *cpu_addr;
42 dma_addr_t handle;
43 struct sg_table *table;
44 bool is_cached;
45};
46
Laura Abbotta8c373f2013-02-15 09:25:35 -080047/*
48 * Create scatter-list for the already allocated DMA buffer.
49 * This function could be replace by dma_common_get_sgtable
50 * as soon as it will avalaible.
51 */
52int ion_secure_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
53 void *cpu_addr, dma_addr_t handle, size_t size)
54{
Laura Abbottf2f42852013-04-15 11:15:54 -070055 struct page *page = phys_to_page(handle);
Laura Abbotta8c373f2013-02-15 09:25:35 -080056 int ret;
57
58 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
59 if (unlikely(ret))
60 return ret;
61
62 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
63 sg_dma_address(sgt->sgl) = handle;
64 return 0;
65}
66
67/* ION CMA heap operations functions */
68static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate(
69 struct ion_heap *heap, struct ion_buffer *buffer,
70 unsigned long len, unsigned long align,
71 unsigned long flags)
72{
73 struct device *dev = heap->priv;
74 struct ion_secure_cma_buffer_info *info;
Laura Abbott532c2ab2013-03-20 12:35:39 -070075 DEFINE_DMA_ATTRS(attrs);
76 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
Laura Abbotta8c373f2013-02-15 09:25:35 -080077
78 dev_dbg(dev, "Request buffer allocation len %ld\n", len);
79
80 info = kzalloc(sizeof(struct ion_secure_cma_buffer_info), GFP_KERNEL);
81 if (!info) {
82 dev_err(dev, "Can't allocate buffer info\n");
83 return ION_CMA_ALLOCATE_FAILED;
84 }
85
Laura Abbott532c2ab2013-03-20 12:35:39 -070086 info->cpu_addr = dma_alloc_attrs(dev, len, &(info->handle), 0, &attrs);
Laura Abbotta8c373f2013-02-15 09:25:35 -080087
88 if (!info->cpu_addr) {
89 dev_err(dev, "Fail to allocate buffer\n");
90 goto err;
91 }
92
93 info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
94 if (!info->table) {
95 dev_err(dev, "Fail to allocate sg table\n");
96 goto err;
97 }
98
Laura Abbotta8c373f2013-02-15 09:25:35 -080099 ion_secure_cma_get_sgtable(dev,
100 info->table, info->cpu_addr, info->handle, len);
101
102 info->secure.buffer = info->handle;
103
104 /* keep this for memory release */
105 buffer->priv_virt = info;
106 dev_dbg(dev, "Allocate buffer %p\n", buffer);
107 return info;
108
109err:
110 kfree(info);
111 return ION_CMA_ALLOCATE_FAILED;
112}
113
114static int ion_secure_cma_allocate(struct ion_heap *heap,
115 struct ion_buffer *buffer,
116 unsigned long len, unsigned long align,
117 unsigned long flags)
118{
Adrian Alexei92538592013-03-27 10:53:43 -0700119 unsigned long secure_allocation = flags & ION_FLAG_SECURE;
Laura Abbotta8c373f2013-02-15 09:25:35 -0800120 struct ion_secure_cma_buffer_info *buf = NULL;
121
122 if (!secure_allocation) {
123 pr_err("%s: non-secure allocation disallowed from heap %s %lx\n",
124 __func__, heap->name, flags);
125 return -ENOMEM;
126 }
127
Laura Abbott532c2ab2013-03-20 12:35:39 -0700128 if (ION_IS_CACHED(flags)) {
129 pr_err("%s: cannot allocate cached memory from secure heap %s\n",
130 __func__, heap->name);
131 return -ENOMEM;
132 }
133
134
Laura Abbotta8c373f2013-02-15 09:25:35 -0800135 buf = __ion_secure_cma_allocate(heap, buffer, len, align, flags);
136
137 if (buf) {
138 buf->secure.want_delayed_unsecure = 0;
139 atomic_set(&buf->secure.secure_cnt, 0);
140 mutex_init(&buf->secure.lock);
141 buf->secure.is_secure = 1;
142 return 0;
143 } else {
144 return -ENOMEM;
145 }
146}
147
148
149static void ion_secure_cma_free(struct ion_buffer *buffer)
150{
151 struct device *dev = buffer->heap->priv;
152 struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
153
154 dev_dbg(dev, "Release buffer %p\n", buffer);
155 /* release memory */
156 dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
157 /* release sg table */
158 kfree(info->table);
159 kfree(info);
160}
161
162static int ion_secure_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
163 ion_phys_addr_t *addr, size_t *len)
164{
165 struct device *dev = heap->priv;
166 struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
167
Laura Abbottbd4af162013-03-18 11:14:47 -0700168 dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
169 &info->handle);
Laura Abbotta8c373f2013-02-15 09:25:35 -0800170
171 *addr = info->handle;
172 *len = buffer->size;
173
174 return 0;
175}
176
177struct sg_table *ion_secure_cma_heap_map_dma(struct ion_heap *heap,
178 struct ion_buffer *buffer)
179{
180 struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
181
182 return info->table;
183}
184
185void ion_secure_cma_heap_unmap_dma(struct ion_heap *heap,
186 struct ion_buffer *buffer)
187{
188 return;
189}
190
191static int ion_secure_cma_mmap(struct ion_heap *mapper,
192 struct ion_buffer *buffer,
193 struct vm_area_struct *vma)
194{
Laura Abbott532c2ab2013-03-20 12:35:39 -0700195 pr_info("%s: mmaping from secure heap %s disallowed\n",
196 __func__, mapper->name);
Laura Abbotta8c373f2013-02-15 09:25:35 -0800197 return -EINVAL;
198}
199
200static void *ion_secure_cma_map_kernel(struct ion_heap *heap,
201 struct ion_buffer *buffer)
202{
Laura Abbott532c2ab2013-03-20 12:35:39 -0700203 pr_info("%s: kernel mapping from secure heap %s disallowed\n",
204 __func__, heap->name);
205 return NULL;
Laura Abbotta8c373f2013-02-15 09:25:35 -0800206}
207
208static void ion_secure_cma_unmap_kernel(struct ion_heap *heap,
209 struct ion_buffer *buffer)
210{
Laura Abbotta8c373f2013-02-15 09:25:35 -0800211 return;
212}
213
Laura Abbotta8c373f2013-02-15 09:25:35 -0800214static int ion_secure_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
215 const struct rb_root *mem_map)
216{
217 if (mem_map) {
218 struct rb_node *n;
219
220 seq_printf(s, "\nMemory Map\n");
221 seq_printf(s, "%16.s %14.s %14.s %14.s\n",
222 "client", "start address", "end address",
223 "size (hex)");
224
225 for (n = rb_first(mem_map); n; n = rb_next(n)) {
226 struct mem_map_data *data =
227 rb_entry(n, struct mem_map_data, node);
228 const char *client_name = "(null)";
229
230
231 if (data->client_name)
232 client_name = data->client_name;
233
Laura Abbott1135c9e2013-03-13 15:33:40 -0700234 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
235 client_name, &data->addr,
236 &data->addr_end,
Laura Abbotta8c373f2013-02-15 09:25:35 -0800237 data->size, data->size);
238 }
239 }
240 return 0;
241}
242
243static struct ion_heap_ops ion_secure_cma_ops = {
244 .allocate = ion_secure_cma_allocate,
245 .free = ion_secure_cma_free,
246 .map_dma = ion_secure_cma_heap_map_dma,
247 .unmap_dma = ion_secure_cma_heap_unmap_dma,
248 .phys = ion_secure_cma_phys,
249 .map_user = ion_secure_cma_mmap,
250 .map_kernel = ion_secure_cma_map_kernel,
251 .unmap_kernel = ion_secure_cma_unmap_kernel,
Laura Abbotta8c373f2013-02-15 09:25:35 -0800252 .print_debug = ion_secure_cma_print_debug,
253 .secure_buffer = ion_cp_secure_buffer,
254 .unsecure_buffer = ion_cp_unsecure_buffer,
255};
256
257struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *data)
258{
259 struct ion_heap *heap;
260
261 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
262
263 if (!heap)
264 return ERR_PTR(-ENOMEM);
265
266 heap->ops = &ion_secure_cma_ops;
267 /* set device as private heaps data, later it will be
268 * used to make the link with reserved CMA memory */
269 heap->priv = data->priv;
270 heap->type = ION_HEAP_TYPE_SECURE_DMA;
Laura Abbotta8c373f2013-02-15 09:25:35 -0800271 return heap;
272}
273
274void ion_secure_cma_heap_destroy(struct ion_heap *heap)
275{
276 kfree(heap);
277}