blob: 415c73e66a40beeba32636a6d1b5827ca717d5e1 [file] [log] [blame]
Laura Abbotta8c373f2013-02-15 09:25:35 -08001/*
2 * drivers/gpu/ion/ion_secure_cma_heap.c
3 *
4 * Copyright (C) Linaro 2012
5 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
6 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/device.h>
20#include <linux/ion.h>
21#include <linux/slab.h>
22#include <linux/errno.h>
23#include <linux/err.h>
24#include <linux/dma-mapping.h>
25#include <linux/msm_ion.h>
26#include <mach/iommu_domains.h>
27
28#include <asm/cacheflush.h>
29
30/* for ion_heap_ops structure */
31#include "ion_priv.h"
32#include "msm/ion_cp_common.h"
33
34#define ION_CMA_ALLOCATE_FAILED NULL
35
36struct ion_secure_cma_buffer_info {
37 /*
38 * This needs to come first for compatibility with the secure buffer API
39 */
40 struct ion_cp_buffer secure;
41 void *cpu_addr;
42 dma_addr_t handle;
43 struct sg_table *table;
44 bool is_cached;
45};
46
Laura Abbotta8c373f2013-02-15 09:25:35 -080047/*
48 * Create scatter-list for the already allocated DMA buffer.
49 * This function could be replace by dma_common_get_sgtable
50 * as soon as it will avalaible.
51 */
52int ion_secure_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
53 void *cpu_addr, dma_addr_t handle, size_t size)
54{
Laura Abbottf2f42852013-04-15 11:15:54 -070055 struct page *page = phys_to_page(handle);
Laura Abbotta8c373f2013-02-15 09:25:35 -080056 int ret;
57
58 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
59 if (unlikely(ret))
60 return ret;
61
62 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
63 sg_dma_address(sgt->sgl) = handle;
64 return 0;
65}
66
67/* ION CMA heap operations functions */
68static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate(
69 struct ion_heap *heap, struct ion_buffer *buffer,
70 unsigned long len, unsigned long align,
71 unsigned long flags)
72{
73 struct device *dev = heap->priv;
74 struct ion_secure_cma_buffer_info *info;
Laura Abbott532c2ab2013-03-20 12:35:39 -070075 DEFINE_DMA_ATTRS(attrs);
76 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
Laura Abbotta8c373f2013-02-15 09:25:35 -080077
78 dev_dbg(dev, "Request buffer allocation len %ld\n", len);
79
80 info = kzalloc(sizeof(struct ion_secure_cma_buffer_info), GFP_KERNEL);
81 if (!info) {
82 dev_err(dev, "Can't allocate buffer info\n");
83 return ION_CMA_ALLOCATE_FAILED;
84 }
85
Laura Abbott83062cf2013-06-19 10:07:21 -070086 info->cpu_addr = dma_alloc_attrs(dev, len, &(info->handle), GFP_KERNEL,
87 &attrs);
Laura Abbotta8c373f2013-02-15 09:25:35 -080088
89 if (!info->cpu_addr) {
90 dev_err(dev, "Fail to allocate buffer\n");
91 goto err;
92 }
93
94 info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
95 if (!info->table) {
96 dev_err(dev, "Fail to allocate sg table\n");
97 goto err;
98 }
99
Laura Abbotta8c373f2013-02-15 09:25:35 -0800100 ion_secure_cma_get_sgtable(dev,
101 info->table, info->cpu_addr, info->handle, len);
102
103 info->secure.buffer = info->handle;
104
105 /* keep this for memory release */
106 buffer->priv_virt = info;
107 dev_dbg(dev, "Allocate buffer %p\n", buffer);
108 return info;
109
110err:
111 kfree(info);
112 return ION_CMA_ALLOCATE_FAILED;
113}
114
115static int ion_secure_cma_allocate(struct ion_heap *heap,
116 struct ion_buffer *buffer,
117 unsigned long len, unsigned long align,
118 unsigned long flags)
119{
Adrian Alexei92538592013-03-27 10:53:43 -0700120 unsigned long secure_allocation = flags & ION_FLAG_SECURE;
Laura Abbotta8c373f2013-02-15 09:25:35 -0800121 struct ion_secure_cma_buffer_info *buf = NULL;
122
123 if (!secure_allocation) {
124 pr_err("%s: non-secure allocation disallowed from heap %s %lx\n",
125 __func__, heap->name, flags);
126 return -ENOMEM;
127 }
128
Laura Abbott532c2ab2013-03-20 12:35:39 -0700129 if (ION_IS_CACHED(flags)) {
130 pr_err("%s: cannot allocate cached memory from secure heap %s\n",
131 __func__, heap->name);
132 return -ENOMEM;
133 }
134
135
Laura Abbotta8c373f2013-02-15 09:25:35 -0800136 buf = __ion_secure_cma_allocate(heap, buffer, len, align, flags);
137
138 if (buf) {
139 buf->secure.want_delayed_unsecure = 0;
140 atomic_set(&buf->secure.secure_cnt, 0);
141 mutex_init(&buf->secure.lock);
142 buf->secure.is_secure = 1;
143 return 0;
144 } else {
145 return -ENOMEM;
146 }
147}
148
149
150static void ion_secure_cma_free(struct ion_buffer *buffer)
151{
152 struct device *dev = buffer->heap->priv;
153 struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
154
155 dev_dbg(dev, "Release buffer %p\n", buffer);
156 /* release memory */
157 dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
Laura Abbottab439ca2013-05-16 13:00:08 -0700158 sg_free_table(info->table);
Laura Abbotta8c373f2013-02-15 09:25:35 -0800159 /* release sg table */
160 kfree(info->table);
161 kfree(info);
162}
163
164static int ion_secure_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
165 ion_phys_addr_t *addr, size_t *len)
166{
167 struct device *dev = heap->priv;
168 struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
169
Laura Abbottbd4af162013-03-18 11:14:47 -0700170 dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
171 &info->handle);
Laura Abbotta8c373f2013-02-15 09:25:35 -0800172
173 *addr = info->handle;
174 *len = buffer->size;
175
176 return 0;
177}
178
179struct sg_table *ion_secure_cma_heap_map_dma(struct ion_heap *heap,
180 struct ion_buffer *buffer)
181{
182 struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
183
184 return info->table;
185}
186
187void ion_secure_cma_heap_unmap_dma(struct ion_heap *heap,
188 struct ion_buffer *buffer)
189{
190 return;
191}
192
193static int ion_secure_cma_mmap(struct ion_heap *mapper,
194 struct ion_buffer *buffer,
195 struct vm_area_struct *vma)
196{
Laura Abbott532c2ab2013-03-20 12:35:39 -0700197 pr_info("%s: mmaping from secure heap %s disallowed\n",
198 __func__, mapper->name);
Laura Abbotta8c373f2013-02-15 09:25:35 -0800199 return -EINVAL;
200}
201
202static void *ion_secure_cma_map_kernel(struct ion_heap *heap,
203 struct ion_buffer *buffer)
204{
Laura Abbott532c2ab2013-03-20 12:35:39 -0700205 pr_info("%s: kernel mapping from secure heap %s disallowed\n",
206 __func__, heap->name);
207 return NULL;
Laura Abbotta8c373f2013-02-15 09:25:35 -0800208}
209
210static void ion_secure_cma_unmap_kernel(struct ion_heap *heap,
211 struct ion_buffer *buffer)
212{
Laura Abbotta8c373f2013-02-15 09:25:35 -0800213 return;
214}
215
Laura Abbotta8c373f2013-02-15 09:25:35 -0800216static int ion_secure_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
217 const struct rb_root *mem_map)
218{
219 if (mem_map) {
220 struct rb_node *n;
221
222 seq_printf(s, "\nMemory Map\n");
223 seq_printf(s, "%16.s %14.s %14.s %14.s\n",
224 "client", "start address", "end address",
225 "size (hex)");
226
227 for (n = rb_first(mem_map); n; n = rb_next(n)) {
228 struct mem_map_data *data =
229 rb_entry(n, struct mem_map_data, node);
230 const char *client_name = "(null)";
231
232
233 if (data->client_name)
234 client_name = data->client_name;
235
Laura Abbott1135c9e2013-03-13 15:33:40 -0700236 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
237 client_name, &data->addr,
238 &data->addr_end,
Laura Abbotta8c373f2013-02-15 09:25:35 -0800239 data->size, data->size);
240 }
241 }
242 return 0;
243}
244
245static struct ion_heap_ops ion_secure_cma_ops = {
246 .allocate = ion_secure_cma_allocate,
247 .free = ion_secure_cma_free,
248 .map_dma = ion_secure_cma_heap_map_dma,
249 .unmap_dma = ion_secure_cma_heap_unmap_dma,
250 .phys = ion_secure_cma_phys,
251 .map_user = ion_secure_cma_mmap,
252 .map_kernel = ion_secure_cma_map_kernel,
253 .unmap_kernel = ion_secure_cma_unmap_kernel,
Laura Abbotta8c373f2013-02-15 09:25:35 -0800254 .print_debug = ion_secure_cma_print_debug,
255 .secure_buffer = ion_cp_secure_buffer,
256 .unsecure_buffer = ion_cp_unsecure_buffer,
257};
258
259struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *data)
260{
261 struct ion_heap *heap;
262
263 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
264
265 if (!heap)
266 return ERR_PTR(-ENOMEM);
267
268 heap->ops = &ion_secure_cma_ops;
269 /* set device as private heaps data, later it will be
270 * used to make the link with reserved CMA memory */
271 heap->priv = data->priv;
272 heap->type = ION_HEAP_TYPE_SECURE_DMA;
Laura Abbotta8c373f2013-02-15 09:25:35 -0800273 return heap;
274}
275
276void ion_secure_cma_heap_destroy(struct ion_heap *heap)
277{
278 kfree(heap);
279}