Laura Abbott | a8c373f | 2013-02-15 09:25:35 -0800 | [diff] [blame] | 1 | /* |
| 2 | * drivers/gpu/ion/ion_secure_cma_heap.c |
| 3 | * |
| 4 | * Copyright (C) Linaro 2012 |
| 5 | * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. |
| 6 | * Copyright (c) 2013, The Linux Foundation. All rights reserved. |
| 7 | * |
| 8 | * This software is licensed under the terms of the GNU General Public |
| 9 | * License version 2, as published by the Free Software Foundation, and |
| 10 | * may be copied, distributed, and modified under those terms. |
| 11 | * |
| 12 | * This program is distributed in the hope that it will be useful, |
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 15 | * GNU General Public License for more details. |
| 16 | * |
| 17 | */ |
| 18 | |
| 19 | #include <linux/device.h> |
| 20 | #include <linux/ion.h> |
| 21 | #include <linux/slab.h> |
| 22 | #include <linux/errno.h> |
| 23 | #include <linux/err.h> |
| 24 | #include <linux/dma-mapping.h> |
| 25 | #include <linux/msm_ion.h> |
| 26 | #include <mach/iommu_domains.h> |
| 27 | |
| 28 | #include <asm/cacheflush.h> |
| 29 | |
| 30 | /* for ion_heap_ops structure */ |
| 31 | #include "ion_priv.h" |
| 32 | #include "msm/ion_cp_common.h" |
| 33 | |
| 34 | #define ION_CMA_ALLOCATE_FAILED NULL |
| 35 | |
| 36 | struct ion_secure_cma_buffer_info { |
| 37 | /* |
| 38 | * This needs to come first for compatibility with the secure buffer API |
| 39 | */ |
| 40 | struct ion_cp_buffer secure; |
| 41 | void *cpu_addr; |
| 42 | dma_addr_t handle; |
| 43 | struct sg_table *table; |
| 44 | bool is_cached; |
| 45 | }; |
| 46 | |
| 47 | static int cma_heap_has_outer_cache; |
| 48 | /* |
| 49 | * Create scatter-list for the already allocated DMA buffer. |
| 50 | * This function could be replace by dma_common_get_sgtable |
| 51 | * as soon as it will avalaible. |
| 52 | */ |
| 53 | int ion_secure_cma_get_sgtable(struct device *dev, struct sg_table *sgt, |
| 54 | void *cpu_addr, dma_addr_t handle, size_t size) |
| 55 | { |
| 56 | struct page *page = virt_to_page(cpu_addr); |
| 57 | int ret; |
| 58 | |
| 59 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); |
| 60 | if (unlikely(ret)) |
| 61 | return ret; |
| 62 | |
| 63 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); |
| 64 | sg_dma_address(sgt->sgl) = handle; |
| 65 | return 0; |
| 66 | } |
| 67 | |
| 68 | /* ION CMA heap operations functions */ |
| 69 | static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate( |
| 70 | struct ion_heap *heap, struct ion_buffer *buffer, |
| 71 | unsigned long len, unsigned long align, |
| 72 | unsigned long flags) |
| 73 | { |
| 74 | struct device *dev = heap->priv; |
| 75 | struct ion_secure_cma_buffer_info *info; |
Laura Abbott | 532c2ab | 2013-03-20 12:35:39 -0700 | [diff] [blame] | 76 | DEFINE_DMA_ATTRS(attrs); |
| 77 | dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); |
Laura Abbott | a8c373f | 2013-02-15 09:25:35 -0800 | [diff] [blame] | 78 | |
| 79 | dev_dbg(dev, "Request buffer allocation len %ld\n", len); |
| 80 | |
| 81 | info = kzalloc(sizeof(struct ion_secure_cma_buffer_info), GFP_KERNEL); |
| 82 | if (!info) { |
| 83 | dev_err(dev, "Can't allocate buffer info\n"); |
| 84 | return ION_CMA_ALLOCATE_FAILED; |
| 85 | } |
| 86 | |
Laura Abbott | 532c2ab | 2013-03-20 12:35:39 -0700 | [diff] [blame] | 87 | info->cpu_addr = dma_alloc_attrs(dev, len, &(info->handle), 0, &attrs); |
Laura Abbott | a8c373f | 2013-02-15 09:25:35 -0800 | [diff] [blame] | 88 | |
| 89 | if (!info->cpu_addr) { |
| 90 | dev_err(dev, "Fail to allocate buffer\n"); |
| 91 | goto err; |
| 92 | } |
| 93 | |
| 94 | info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); |
| 95 | if (!info->table) { |
| 96 | dev_err(dev, "Fail to allocate sg table\n"); |
| 97 | goto err; |
| 98 | } |
| 99 | |
Laura Abbott | a8c373f | 2013-02-15 09:25:35 -0800 | [diff] [blame] | 100 | ion_secure_cma_get_sgtable(dev, |
| 101 | info->table, info->cpu_addr, info->handle, len); |
| 102 | |
| 103 | info->secure.buffer = info->handle; |
| 104 | |
| 105 | /* keep this for memory release */ |
| 106 | buffer->priv_virt = info; |
| 107 | dev_dbg(dev, "Allocate buffer %p\n", buffer); |
| 108 | return info; |
| 109 | |
| 110 | err: |
| 111 | kfree(info); |
| 112 | return ION_CMA_ALLOCATE_FAILED; |
| 113 | } |
| 114 | |
| 115 | static int ion_secure_cma_allocate(struct ion_heap *heap, |
| 116 | struct ion_buffer *buffer, |
| 117 | unsigned long len, unsigned long align, |
| 118 | unsigned long flags) |
| 119 | { |
Adrian Alexei | 9253859 | 2013-03-27 10:53:43 -0700 | [diff] [blame^] | 120 | unsigned long secure_allocation = flags & ION_FLAG_SECURE; |
Laura Abbott | a8c373f | 2013-02-15 09:25:35 -0800 | [diff] [blame] | 121 | struct ion_secure_cma_buffer_info *buf = NULL; |
| 122 | |
| 123 | if (!secure_allocation) { |
| 124 | pr_err("%s: non-secure allocation disallowed from heap %s %lx\n", |
| 125 | __func__, heap->name, flags); |
| 126 | return -ENOMEM; |
| 127 | } |
| 128 | |
Laura Abbott | 532c2ab | 2013-03-20 12:35:39 -0700 | [diff] [blame] | 129 | if (ION_IS_CACHED(flags)) { |
| 130 | pr_err("%s: cannot allocate cached memory from secure heap %s\n", |
| 131 | __func__, heap->name); |
| 132 | return -ENOMEM; |
| 133 | } |
| 134 | |
| 135 | |
Laura Abbott | a8c373f | 2013-02-15 09:25:35 -0800 | [diff] [blame] | 136 | buf = __ion_secure_cma_allocate(heap, buffer, len, align, flags); |
| 137 | |
| 138 | if (buf) { |
| 139 | buf->secure.want_delayed_unsecure = 0; |
| 140 | atomic_set(&buf->secure.secure_cnt, 0); |
| 141 | mutex_init(&buf->secure.lock); |
| 142 | buf->secure.is_secure = 1; |
| 143 | return 0; |
| 144 | } else { |
| 145 | return -ENOMEM; |
| 146 | } |
| 147 | } |
| 148 | |
| 149 | |
| 150 | static void ion_secure_cma_free(struct ion_buffer *buffer) |
| 151 | { |
| 152 | struct device *dev = buffer->heap->priv; |
| 153 | struct ion_secure_cma_buffer_info *info = buffer->priv_virt; |
| 154 | |
| 155 | dev_dbg(dev, "Release buffer %p\n", buffer); |
| 156 | /* release memory */ |
| 157 | dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle); |
| 158 | /* release sg table */ |
| 159 | kfree(info->table); |
| 160 | kfree(info); |
| 161 | } |
| 162 | |
| 163 | static int ion_secure_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer, |
| 164 | ion_phys_addr_t *addr, size_t *len) |
| 165 | { |
| 166 | struct device *dev = heap->priv; |
| 167 | struct ion_secure_cma_buffer_info *info = buffer->priv_virt; |
| 168 | |
Laura Abbott | bd4af16 | 2013-03-18 11:14:47 -0700 | [diff] [blame] | 169 | dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer, |
| 170 | &info->handle); |
Laura Abbott | a8c373f | 2013-02-15 09:25:35 -0800 | [diff] [blame] | 171 | |
| 172 | *addr = info->handle; |
| 173 | *len = buffer->size; |
| 174 | |
| 175 | return 0; |
| 176 | } |
| 177 | |
| 178 | struct sg_table *ion_secure_cma_heap_map_dma(struct ion_heap *heap, |
| 179 | struct ion_buffer *buffer) |
| 180 | { |
| 181 | struct ion_secure_cma_buffer_info *info = buffer->priv_virt; |
| 182 | |
| 183 | return info->table; |
| 184 | } |
| 185 | |
| 186 | void ion_secure_cma_heap_unmap_dma(struct ion_heap *heap, |
| 187 | struct ion_buffer *buffer) |
| 188 | { |
| 189 | return; |
| 190 | } |
| 191 | |
| 192 | static int ion_secure_cma_mmap(struct ion_heap *mapper, |
| 193 | struct ion_buffer *buffer, |
| 194 | struct vm_area_struct *vma) |
| 195 | { |
Laura Abbott | 532c2ab | 2013-03-20 12:35:39 -0700 | [diff] [blame] | 196 | pr_info("%s: mmaping from secure heap %s disallowed\n", |
| 197 | __func__, mapper->name); |
Laura Abbott | a8c373f | 2013-02-15 09:25:35 -0800 | [diff] [blame] | 198 | return -EINVAL; |
| 199 | } |
| 200 | |
| 201 | static void *ion_secure_cma_map_kernel(struct ion_heap *heap, |
| 202 | struct ion_buffer *buffer) |
| 203 | { |
Laura Abbott | 532c2ab | 2013-03-20 12:35:39 -0700 | [diff] [blame] | 204 | pr_info("%s: kernel mapping from secure heap %s disallowed\n", |
| 205 | __func__, heap->name); |
| 206 | return NULL; |
Laura Abbott | a8c373f | 2013-02-15 09:25:35 -0800 | [diff] [blame] | 207 | } |
| 208 | |
| 209 | static void ion_secure_cma_unmap_kernel(struct ion_heap *heap, |
| 210 | struct ion_buffer *buffer) |
| 211 | { |
Laura Abbott | a8c373f | 2013-02-15 09:25:35 -0800 | [diff] [blame] | 212 | return; |
| 213 | } |
| 214 | |
| 215 | int ion_secure_cma_map_iommu(struct ion_buffer *buffer, |
| 216 | struct ion_iommu_map *data, |
| 217 | unsigned int domain_num, |
| 218 | unsigned int partition_num, |
| 219 | unsigned long align, |
| 220 | unsigned long iova_length, |
| 221 | unsigned long flags) |
| 222 | { |
| 223 | int ret = 0; |
| 224 | struct iommu_domain *domain; |
| 225 | unsigned long extra; |
| 226 | unsigned long extra_iova_addr; |
| 227 | struct ion_secure_cma_buffer_info *info = buffer->priv_virt; |
| 228 | struct sg_table *table = info->table; |
| 229 | int prot = IOMMU_WRITE | IOMMU_READ; |
| 230 | |
| 231 | data->mapped_size = iova_length; |
| 232 | |
| 233 | if (!msm_use_iommu()) { |
| 234 | data->iova_addr = info->handle; |
| 235 | return 0; |
| 236 | } |
| 237 | |
| 238 | extra = iova_length - buffer->size; |
| 239 | |
| 240 | ret = msm_allocate_iova_address(domain_num, partition_num, |
| 241 | data->mapped_size, align, |
| 242 | &data->iova_addr); |
| 243 | |
| 244 | if (ret) |
| 245 | goto out; |
| 246 | |
| 247 | domain = msm_get_iommu_domain(domain_num); |
| 248 | |
| 249 | if (!domain) { |
| 250 | ret = -EINVAL; |
| 251 | goto out1; |
| 252 | } |
| 253 | |
| 254 | ret = iommu_map_range(domain, data->iova_addr, table->sgl, |
| 255 | buffer->size, prot); |
| 256 | |
| 257 | if (ret) { |
| 258 | pr_err("%s: could not map %lx in domain %p\n", |
| 259 | __func__, data->iova_addr, domain); |
| 260 | goto out1; |
| 261 | } |
| 262 | |
| 263 | extra_iova_addr = data->iova_addr + buffer->size; |
| 264 | if (extra) { |
Mitchel Humpherys | 76708e4 | 2013-02-20 12:41:01 -0800 | [diff] [blame] | 265 | unsigned long phys_addr = sg_phys(table->sgl); |
| 266 | ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr, |
| 267 | extra, SZ_4K, prot); |
Laura Abbott | a8c373f | 2013-02-15 09:25:35 -0800 | [diff] [blame] | 268 | if (ret) |
| 269 | goto out2; |
| 270 | } |
| 271 | return ret; |
| 272 | |
| 273 | out2: |
| 274 | iommu_unmap_range(domain, data->iova_addr, buffer->size); |
| 275 | out1: |
| 276 | msm_free_iova_address(data->iova_addr, domain_num, partition_num, |
| 277 | data->mapped_size); |
| 278 | out: |
| 279 | return ret; |
| 280 | } |
| 281 | |
| 282 | |
| 283 | void ion_secure_cma_unmap_iommu(struct ion_iommu_map *data) |
| 284 | { |
| 285 | unsigned int domain_num; |
| 286 | unsigned int partition_num; |
| 287 | struct iommu_domain *domain; |
| 288 | |
| 289 | if (!msm_use_iommu()) |
| 290 | return; |
| 291 | |
| 292 | domain_num = iommu_map_domain(data); |
| 293 | partition_num = iommu_map_partition(data); |
| 294 | |
| 295 | domain = msm_get_iommu_domain(domain_num); |
| 296 | |
| 297 | if (!domain) { |
| 298 | WARN(1, "Could not get domain %d. Corruption?\n", domain_num); |
| 299 | return; |
| 300 | } |
| 301 | |
| 302 | iommu_unmap_range(domain, data->iova_addr, data->mapped_size); |
| 303 | msm_free_iova_address(data->iova_addr, domain_num, partition_num, |
| 304 | data->mapped_size); |
| 305 | |
| 306 | return; |
| 307 | } |
| 308 | |
| 309 | int ion_secure_cma_cache_ops(struct ion_heap *heap, |
| 310 | struct ion_buffer *buffer, void *vaddr, |
| 311 | unsigned int offset, unsigned int length, |
| 312 | unsigned int cmd) |
| 313 | { |
Laura Abbott | 532c2ab | 2013-03-20 12:35:39 -0700 | [diff] [blame] | 314 | pr_info("%s: cache operations disallowed from secure heap %s\n", |
| 315 | __func__, heap->name); |
| 316 | return -EINVAL; |
Laura Abbott | a8c373f | 2013-02-15 09:25:35 -0800 | [diff] [blame] | 317 | } |
| 318 | |
| 319 | static int ion_secure_cma_print_debug(struct ion_heap *heap, struct seq_file *s, |
| 320 | const struct rb_root *mem_map) |
| 321 | { |
| 322 | if (mem_map) { |
| 323 | struct rb_node *n; |
| 324 | |
| 325 | seq_printf(s, "\nMemory Map\n"); |
| 326 | seq_printf(s, "%16.s %14.s %14.s %14.s\n", |
| 327 | "client", "start address", "end address", |
| 328 | "size (hex)"); |
| 329 | |
| 330 | for (n = rb_first(mem_map); n; n = rb_next(n)) { |
| 331 | struct mem_map_data *data = |
| 332 | rb_entry(n, struct mem_map_data, node); |
| 333 | const char *client_name = "(null)"; |
| 334 | |
| 335 | |
| 336 | if (data->client_name) |
| 337 | client_name = data->client_name; |
| 338 | |
Laura Abbott | 1135c9e | 2013-03-13 15:33:40 -0700 | [diff] [blame] | 339 | seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n", |
| 340 | client_name, &data->addr, |
| 341 | &data->addr_end, |
Laura Abbott | a8c373f | 2013-02-15 09:25:35 -0800 | [diff] [blame] | 342 | data->size, data->size); |
| 343 | } |
| 344 | } |
| 345 | return 0; |
| 346 | } |
| 347 | |
| 348 | static struct ion_heap_ops ion_secure_cma_ops = { |
| 349 | .allocate = ion_secure_cma_allocate, |
| 350 | .free = ion_secure_cma_free, |
| 351 | .map_dma = ion_secure_cma_heap_map_dma, |
| 352 | .unmap_dma = ion_secure_cma_heap_unmap_dma, |
| 353 | .phys = ion_secure_cma_phys, |
| 354 | .map_user = ion_secure_cma_mmap, |
| 355 | .map_kernel = ion_secure_cma_map_kernel, |
| 356 | .unmap_kernel = ion_secure_cma_unmap_kernel, |
| 357 | .map_iommu = ion_secure_cma_map_iommu, |
| 358 | .unmap_iommu = ion_secure_cma_unmap_iommu, |
| 359 | .cache_op = ion_secure_cma_cache_ops, |
| 360 | .print_debug = ion_secure_cma_print_debug, |
| 361 | .secure_buffer = ion_cp_secure_buffer, |
| 362 | .unsecure_buffer = ion_cp_unsecure_buffer, |
| 363 | }; |
| 364 | |
| 365 | struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *data) |
| 366 | { |
| 367 | struct ion_heap *heap; |
| 368 | |
| 369 | heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); |
| 370 | |
| 371 | if (!heap) |
| 372 | return ERR_PTR(-ENOMEM); |
| 373 | |
| 374 | heap->ops = &ion_secure_cma_ops; |
| 375 | /* set device as private heaps data, later it will be |
| 376 | * used to make the link with reserved CMA memory */ |
| 377 | heap->priv = data->priv; |
| 378 | heap->type = ION_HEAP_TYPE_SECURE_DMA; |
| 379 | cma_heap_has_outer_cache = data->has_outer_cache; |
| 380 | return heap; |
| 381 | } |
| 382 | |
| 383 | void ion_secure_cma_heap_destroy(struct ion_heap *heap) |
| 384 | { |
| 385 | kfree(heap); |
| 386 | } |