Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1 | /* |
| 2 | * drivers/gpu/ion/ion_cp_heap.c |
| 3 | * |
| 4 | * Copyright (C) 2011 Google, Inc. |
Laura Abbott | 7db4e0b | 2013-01-03 14:20:16 -0800 | [diff] [blame] | 5 | * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 6 | * |
| 7 | * This software is licensed under the terms of the GNU General Public |
| 8 | * License version 2, as published by the Free Software Foundation, and |
| 9 | * may be copied, distributed, and modified under those terms. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | */ |
| 17 | #include <linux/spinlock.h> |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 18 | #include <linux/delay.h> |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 19 | #include <linux/err.h> |
| 20 | #include <linux/genalloc.h> |
| 21 | #include <linux/io.h> |
Mitchel Humpherys | af2e5c5 | 2012-09-06 12:16:36 -0700 | [diff] [blame] | 22 | #include <linux/msm_ion.h> |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 23 | #include <linux/mm.h> |
| 24 | #include <linux/scatterlist.h> |
| 25 | #include <linux/slab.h> |
| 26 | #include <linux/vmalloc.h> |
| 27 | #include <linux/memory_alloc.h> |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 28 | #include <linux/seq_file.h> |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 29 | #include <linux/iommu.h> |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 30 | #include <linux/dma-mapping.h> |
Liam Mark | cc2d4bd | 2013-01-16 10:14:40 -0800 | [diff] [blame] | 31 | #include <trace/events/kmem.h> |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 32 | |
| 33 | #include <asm/mach/map.h> |
| 34 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 35 | #include <mach/msm_memtypes.h> |
| 36 | #include <mach/scm.h> |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 37 | #include <mach/iommu_domains.h> |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 38 | |
| 39 | #include "ion_priv.h" |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 40 | |
| 41 | #include <asm/mach/map.h> |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 42 | #include <asm/cacheflush.h> |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 43 | |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 44 | #include "msm/ion_cp_common.h" |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 45 | /** |
| 46 | * struct ion_cp_heap - container for the heap and shared heap data |
| 47 | |
| 48 | * @heap: the heap information structure |
| 49 | * @pool: memory pool to allocate from. |
| 50 | * @base: the base address of the memory pool. |
| 51 | * @permission_type: Identifier for the memory used by SCM for protecting |
| 52 | * and unprotecting memory. |
Olav Haugan | 42ebe71 | 2012-01-10 16:30:58 -0800 | [diff] [blame] | 53 | * @secure_base: Base address used when securing a heap that is shared. |
| 54 | * @secure_size: Size used when securing a heap that is shared. |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 55 | * @lock: mutex to protect shared access. |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 56 | * @heap_protected: Indicates whether heap has been protected or not. |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 57 | * @allocated_bytes: the total number of allocated bytes from the pool. |
| 58 | * @total_size: the total size of the memory pool. |
| 59 | * @request_region: function pointer to call when first mapping of memory |
| 60 | * occurs. |
| 61 | * @release_region: function pointer to call when last mapping of memory |
| 62 | * unmapped. |
| 63 | * @bus_id: token used with request/release region. |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 64 | * @kmap_cached_count: the total number of times this heap has been mapped in |
| 65 | * kernel space (cached). |
| 66 | * @kmap_uncached_count:the total number of times this heap has been mapped in |
| 67 | * kernel space (un-cached). |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 68 | * @umap_count: the total number of times this heap has been mapped in |
| 69 | * user space. |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 70 | * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise. |
| 71 | */ |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 72 | struct ion_cp_heap { |
| 73 | struct ion_heap heap; |
| 74 | struct gen_pool *pool; |
| 75 | ion_phys_addr_t base; |
| 76 | unsigned int permission_type; |
Olav Haugan | 42ebe71 | 2012-01-10 16:30:58 -0800 | [diff] [blame] | 77 | ion_phys_addr_t secure_base; |
| 78 | size_t secure_size; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 79 | struct mutex lock; |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 80 | unsigned int heap_protected; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 81 | unsigned long allocated_bytes; |
| 82 | unsigned long total_size; |
Laura Abbott | aedbe42 | 2012-08-03 17:06:22 -0700 | [diff] [blame] | 83 | int (*heap_request_region)(void *); |
| 84 | int (*heap_release_region)(void *); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 85 | void *bus_id; |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 86 | unsigned long kmap_cached_count; |
| 87 | unsigned long kmap_uncached_count; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 88 | unsigned long umap_count; |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 89 | unsigned int has_outer_cache; |
Laura Abbott | f68983e | 2012-06-13 16:23:23 -0700 | [diff] [blame] | 90 | atomic_t protect_cnt; |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 91 | void *cpu_addr; |
| 92 | size_t heap_size; |
| 93 | dma_addr_t handle; |
| 94 | int cma; |
Mitchel Humpherys | 345f023 | 2013-01-11 10:55:25 -0800 | [diff] [blame] | 95 | int allow_non_secure_allocation; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 96 | }; |
| 97 | |
| 98 | enum { |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 99 | HEAP_NOT_PROTECTED = 0, |
| 100 | HEAP_PROTECTED = 1, |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 101 | }; |
| 102 | |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 103 | #define DMA_ALLOC_TRIES 5 |
| 104 | |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 105 | static int allocate_heap_memory(struct ion_heap *heap) |
| 106 | { |
| 107 | struct device *dev = heap->priv; |
| 108 | struct ion_cp_heap *cp_heap = |
| 109 | container_of(heap, struct ion_cp_heap, heap); |
| 110 | int ret; |
| 111 | int tries = 0; |
| 112 | DEFINE_DMA_ATTRS(attrs); |
| 113 | dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); |
| 114 | |
| 115 | |
| 116 | if (cp_heap->cpu_addr) |
| 117 | return 0; |
| 118 | |
| 119 | while (!cp_heap->cpu_addr && (++tries < DMA_ALLOC_TRIES)) { |
| 120 | cp_heap->cpu_addr = dma_alloc_attrs(dev, |
| 121 | cp_heap->heap_size, |
| 122 | &(cp_heap->handle), |
| 123 | 0, |
| 124 | &attrs); |
Liam Mark | cc2d4bd | 2013-01-16 10:14:40 -0800 | [diff] [blame] | 125 | if (!cp_heap->cpu_addr) { |
| 126 | trace_ion_cp_alloc_retry(tries); |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 127 | msleep(20); |
Liam Mark | cc2d4bd | 2013-01-16 10:14:40 -0800 | [diff] [blame] | 128 | } |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 129 | } |
| 130 | |
| 131 | if (!cp_heap->cpu_addr) |
| 132 | goto out; |
| 133 | |
| 134 | cp_heap->base = cp_heap->handle; |
| 135 | |
| 136 | cp_heap->pool = gen_pool_create(12, -1); |
| 137 | if (!cp_heap->pool) |
| 138 | goto out_free; |
| 139 | |
| 140 | ret = gen_pool_add(cp_heap->pool, cp_heap->base, |
| 141 | cp_heap->heap_size, -1); |
| 142 | if (ret < 0) |
| 143 | goto out_pool; |
| 144 | |
| 145 | return 0; |
| 146 | |
| 147 | out_pool: |
| 148 | gen_pool_destroy(cp_heap->pool); |
| 149 | out_free: |
| 150 | dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr, |
| 151 | cp_heap->handle); |
| 152 | out: |
| 153 | return ION_CP_ALLOCATE_FAIL; |
| 154 | } |
| 155 | |
| 156 | static void free_heap_memory(struct ion_heap *heap) |
| 157 | { |
| 158 | struct device *dev = heap->priv; |
| 159 | struct ion_cp_heap *cp_heap = |
| 160 | container_of(heap, struct ion_cp_heap, heap); |
| 161 | |
| 162 | /* release memory */ |
| 163 | dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr, |
| 164 | cp_heap->handle); |
| 165 | gen_pool_destroy(cp_heap->pool); |
| 166 | cp_heap->pool = NULL; |
| 167 | cp_heap->cpu_addr = 0; |
| 168 | } |
| 169 | |
| 170 | |
| 171 | |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 172 | /** |
| 173 | * Get the total number of kernel mappings. |
| 174 | * Must be called with heap->lock locked. |
| 175 | */ |
| 176 | static unsigned long ion_cp_get_total_kmap_count( |
| 177 | const struct ion_cp_heap *cp_heap) |
| 178 | { |
| 179 | return cp_heap->kmap_cached_count + cp_heap->kmap_uncached_count; |
| 180 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 181 | |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 182 | static int ion_on_first_alloc(struct ion_heap *heap) |
| 183 | { |
| 184 | struct ion_cp_heap *cp_heap = |
| 185 | container_of(heap, struct ion_cp_heap, heap); |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 186 | int ret_value; |
| 187 | |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 188 | if (cp_heap->cma) { |
| 189 | ret_value = allocate_heap_memory(heap); |
| 190 | if (ret_value) |
| 191 | return 1; |
| 192 | } |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 193 | return 0; |
| 194 | } |
| 195 | |
| 196 | static void ion_on_last_free(struct ion_heap *heap) |
| 197 | { |
| 198 | struct ion_cp_heap *cp_heap = |
| 199 | container_of(heap, struct ion_cp_heap, heap); |
| 200 | |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 201 | if (cp_heap->cma) |
| 202 | free_heap_memory(heap); |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 203 | } |
| 204 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 205 | /** |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 206 | * Protects memory if heap is unsecured heap. Also ensures that we are in |
| 207 | * the correct FMEM state if this heap is a reusable heap. |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 208 | * Must be called with heap->lock locked. |
| 209 | */ |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 210 | static int ion_cp_protect(struct ion_heap *heap, int version, void *data) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 211 | { |
| 212 | struct ion_cp_heap *cp_heap = |
| 213 | container_of(heap, struct ion_cp_heap, heap); |
| 214 | int ret_value = 0; |
| 215 | |
Laura Abbott | f68983e | 2012-06-13 16:23:23 -0700 | [diff] [blame] | 216 | if (atomic_inc_return(&cp_heap->protect_cnt) == 1) { |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 217 | /* Make sure we are in C state when the heap is protected. */ |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 218 | if (!cp_heap->allocated_bytes) |
| 219 | if (ion_on_first_alloc(heap)) |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 220 | goto out; |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 221 | |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 222 | ret_value = ion_cp_protect_mem(cp_heap->secure_base, |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 223 | cp_heap->secure_size, cp_heap->permission_type, |
| 224 | version, data); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 225 | if (ret_value) { |
| 226 | pr_err("Failed to protect memory for heap %s - " |
Olav Haugan | 9b2d1c2 | 2012-01-09 15:23:08 -0800 | [diff] [blame] | 227 | "error code: %d\n", heap->name, ret_value); |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 228 | |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 229 | if (!cp_heap->allocated_bytes) |
| 230 | ion_on_last_free(heap); |
| 231 | |
Laura Abbott | f68983e | 2012-06-13 16:23:23 -0700 | [diff] [blame] | 232 | atomic_dec(&cp_heap->protect_cnt); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 233 | } else { |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 234 | cp_heap->heap_protected = HEAP_PROTECTED; |
Laura Abbott | 1135c9e | 2013-03-13 15:33:40 -0700 | [diff] [blame] | 235 | pr_debug("Protected heap %s @ 0x%pa\n", |
| 236 | heap->name, &cp_heap->base); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 237 | } |
| 238 | } |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 239 | out: |
Laura Abbott | f68983e | 2012-06-13 16:23:23 -0700 | [diff] [blame] | 240 | pr_debug("%s: protect count is %d\n", __func__, |
| 241 | atomic_read(&cp_heap->protect_cnt)); |
| 242 | BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 243 | return ret_value; |
| 244 | } |
| 245 | |
| 246 | /** |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 247 | * Unprotects memory if heap is secure heap. Also ensures that we are in |
| 248 | * the correct FMEM state if this heap is a reusable heap. |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 249 | * Must be called with heap->lock locked. |
| 250 | */ |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 251 | static void ion_cp_unprotect(struct ion_heap *heap, int version, void *data) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 252 | { |
| 253 | struct ion_cp_heap *cp_heap = |
| 254 | container_of(heap, struct ion_cp_heap, heap); |
| 255 | |
Laura Abbott | f68983e | 2012-06-13 16:23:23 -0700 | [diff] [blame] | 256 | if (atomic_dec_and_test(&cp_heap->protect_cnt)) { |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 257 | int error_code = ion_cp_unprotect_mem( |
Olav Haugan | 42ebe71 | 2012-01-10 16:30:58 -0800 | [diff] [blame] | 258 | cp_heap->secure_base, cp_heap->secure_size, |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 259 | cp_heap->permission_type, version, data); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 260 | if (error_code) { |
| 261 | pr_err("Failed to un-protect memory for heap %s - " |
Olav Haugan | 9b2d1c2 | 2012-01-09 15:23:08 -0800 | [diff] [blame] | 262 | "error code: %d\n", heap->name, error_code); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 263 | } else { |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 264 | cp_heap->heap_protected = HEAP_NOT_PROTECTED; |
Olav Haugan | 9b2d1c2 | 2012-01-09 15:23:08 -0800 | [diff] [blame] | 265 | pr_debug("Un-protected heap %s @ 0x%x\n", heap->name, |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 266 | (unsigned int) cp_heap->base); |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 267 | |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 268 | if (!cp_heap->allocated_bytes) |
| 269 | ion_on_last_free(heap); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 270 | } |
| 271 | } |
Laura Abbott | f68983e | 2012-06-13 16:23:23 -0700 | [diff] [blame] | 272 | pr_debug("%s: protect count is %d\n", __func__, |
| 273 | atomic_read(&cp_heap->protect_cnt)); |
| 274 | BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 275 | } |
| 276 | |
| 277 | ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap, |
| 278 | unsigned long size, |
| 279 | unsigned long align, |
| 280 | unsigned long flags) |
| 281 | { |
| 282 | unsigned long offset; |
Adrian Alexei | 9253859 | 2013-03-27 10:53:43 -0700 | [diff] [blame] | 283 | unsigned long secure_allocation = flags & ION_FLAG_SECURE; |
| 284 | unsigned long force_contig = flags & ION_FLAG_FORCE_CONTIGUOUS; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 285 | |
| 286 | struct ion_cp_heap *cp_heap = |
| 287 | container_of(heap, struct ion_cp_heap, heap); |
| 288 | |
| 289 | mutex_lock(&cp_heap->lock); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 290 | if (!secure_allocation && cp_heap->heap_protected == HEAP_PROTECTED) { |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 291 | mutex_unlock(&cp_heap->lock); |
| 292 | pr_err("ION cannot allocate un-secure memory from protected" |
Olav Haugan | 9b2d1c2 | 2012-01-09 15:23:08 -0800 | [diff] [blame] | 293 | " heap %s\n", heap->name); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 294 | return ION_CP_ALLOCATE_FAIL; |
| 295 | } |
| 296 | |
Laura Abbott | 7db4e0b | 2013-01-03 14:20:16 -0800 | [diff] [blame] | 297 | if (!force_contig && !secure_allocation && |
Mitchel Humpherys | 345f023 | 2013-01-11 10:55:25 -0800 | [diff] [blame] | 298 | !cp_heap->allow_non_secure_allocation) { |
Laura Abbott | ac96331 | 2012-12-11 15:09:03 -0800 | [diff] [blame] | 299 | mutex_unlock(&cp_heap->lock); |
| 300 | pr_debug("%s: non-secure allocation disallowed from this heap\n", |
| 301 | __func__); |
| 302 | return ION_CP_ALLOCATE_FAIL; |
| 303 | } |
| 304 | |
Laura Abbott | 087db59 | 2012-11-01 09:41:37 -0700 | [diff] [blame] | 305 | /* |
| 306 | * The check above already checked for non-secure allocations when the |
| 307 | * heap is protected. HEAP_PROTECTED implies that this must be a secure |
| 308 | * allocation. If the heap is protected and there are userspace or |
| 309 | * cached kernel mappings, something has gone wrong in the security |
| 310 | * model. |
| 311 | */ |
| 312 | if (cp_heap->heap_protected == HEAP_PROTECTED) { |
| 313 | BUG_ON(cp_heap->umap_count != 0); |
| 314 | BUG_ON(cp_heap->kmap_cached_count != 0); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 315 | } |
| 316 | |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 317 | /* |
| 318 | * if this is the first reusable allocation, transition |
| 319 | * the heap |
| 320 | */ |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 321 | if (!cp_heap->allocated_bytes) |
| 322 | if (ion_on_first_alloc(heap)) { |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 323 | mutex_unlock(&cp_heap->lock); |
| 324 | return ION_RESERVED_ALLOCATE_FAIL; |
| 325 | } |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 326 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 327 | cp_heap->allocated_bytes += size; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 328 | mutex_unlock(&cp_heap->lock); |
| 329 | |
| 330 | offset = gen_pool_alloc_aligned(cp_heap->pool, |
| 331 | size, ilog2(align)); |
| 332 | |
| 333 | if (!offset) { |
| 334 | mutex_lock(&cp_heap->lock); |
Olav Haugan | d710ed1 | 2012-04-19 14:23:04 -0700 | [diff] [blame] | 335 | cp_heap->allocated_bytes -= size; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 336 | if ((cp_heap->total_size - |
Olav Haugan | d710ed1 | 2012-04-19 14:23:04 -0700 | [diff] [blame] | 337 | cp_heap->allocated_bytes) >= size) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 338 | pr_debug("%s: heap %s has enough memory (%lx) but" |
| 339 | " the allocation of size %lx still failed." |
Olav Haugan | 9b2d1c2 | 2012-01-09 15:23:08 -0800 | [diff] [blame] | 340 | " Memory is probably fragmented.\n", |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 341 | __func__, heap->name, |
| 342 | cp_heap->total_size - |
| 343 | cp_heap->allocated_bytes, size); |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 344 | if (!cp_heap->allocated_bytes && |
| 345 | cp_heap->heap_protected == HEAP_NOT_PROTECTED) |
| 346 | ion_on_last_free(heap); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 347 | mutex_unlock(&cp_heap->lock); |
| 348 | |
| 349 | return ION_CP_ALLOCATE_FAIL; |
| 350 | } |
| 351 | |
| 352 | return offset; |
| 353 | } |
| 354 | |
| 355 | void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr, |
| 356 | unsigned long size) |
| 357 | { |
| 358 | struct ion_cp_heap *cp_heap = |
| 359 | container_of(heap, struct ion_cp_heap, heap); |
| 360 | |
| 361 | if (addr == ION_CP_ALLOCATE_FAIL) |
| 362 | return; |
| 363 | gen_pool_free(cp_heap->pool, addr, size); |
| 364 | |
| 365 | mutex_lock(&cp_heap->lock); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 366 | cp_heap->allocated_bytes -= size; |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 367 | |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 368 | if (!cp_heap->allocated_bytes && |
| 369 | cp_heap->heap_protected == HEAP_NOT_PROTECTED) |
| 370 | ion_on_last_free(heap); |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 371 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 372 | mutex_unlock(&cp_heap->lock); |
| 373 | } |
| 374 | |
| 375 | static int ion_cp_heap_phys(struct ion_heap *heap, |
| 376 | struct ion_buffer *buffer, |
| 377 | ion_phys_addr_t *addr, size_t *len) |
| 378 | { |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 379 | struct ion_cp_buffer *buf = buffer->priv_virt; |
| 380 | |
| 381 | *addr = buf->buffer; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 382 | *len = buffer->size; |
| 383 | return 0; |
| 384 | } |
| 385 | |
| 386 | static int ion_cp_heap_allocate(struct ion_heap *heap, |
| 387 | struct ion_buffer *buffer, |
| 388 | unsigned long size, unsigned long align, |
| 389 | unsigned long flags) |
| 390 | { |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 391 | struct ion_cp_buffer *buf; |
| 392 | phys_addr_t addr; |
| 393 | |
Mitchel Humpherys | 8fd2ac2 | 2013-02-27 14:41:36 -0800 | [diff] [blame] | 394 | /* |
| 395 | * we never want Ion to fault pages in for us with this |
| 396 | * heap. We want to set up the mappings ourselves in .map_user |
| 397 | */ |
| 398 | flags |= ION_FLAG_CACHED_NEEDS_SYNC; |
| 399 | |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 400 | buf = kzalloc(sizeof(*buf), GFP_KERNEL); |
| 401 | if (!buf) |
| 402 | return ION_CP_ALLOCATE_FAIL; |
| 403 | |
| 404 | addr = ion_cp_allocate(heap, size, align, flags); |
| 405 | if (addr == ION_CP_ALLOCATE_FAIL) |
| 406 | return -ENOMEM; |
| 407 | |
| 408 | buf->buffer = addr; |
Laura Abbott | 9361930 | 2012-10-11 11:51:40 -0700 | [diff] [blame] | 409 | buf->want_delayed_unsecure = 0; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 410 | atomic_set(&buf->secure_cnt, 0); |
| 411 | mutex_init(&buf->lock); |
Adrian Alexei | 9253859 | 2013-03-27 10:53:43 -0700 | [diff] [blame] | 412 | buf->is_secure = flags & ION_FLAG_SECURE ? 1 : 0; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 413 | buffer->priv_virt = buf; |
| 414 | |
| 415 | return 0; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 416 | } |
| 417 | |
| 418 | static void ion_cp_heap_free(struct ion_buffer *buffer) |
| 419 | { |
| 420 | struct ion_heap *heap = buffer->heap; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 421 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 422 | |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 423 | ion_cp_free(heap, buf->buffer, buffer->size); |
| 424 | WARN_ON(atomic_read(&buf->secure_cnt)); |
| 425 | WARN_ON(atomic_read(&buf->map_cnt)); |
| 426 | kfree(buf); |
| 427 | |
| 428 | buffer->priv_virt = NULL; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 429 | } |
| 430 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 431 | struct sg_table *ion_cp_heap_create_sg_table(struct ion_buffer *buffer) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 432 | { |
Mitchel Humpherys | c0aab03 | 2013-01-07 14:21:33 -0800 | [diff] [blame] | 433 | size_t chunk_size = buffer->size; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 434 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 435 | |
Mitchel Humpherys | c0aab03 | 2013-01-07 14:21:33 -0800 | [diff] [blame] | 436 | if (ION_IS_CACHED(buffer->flags)) |
| 437 | chunk_size = PAGE_SIZE; |
| 438 | else if (buf->is_secure && IS_ALIGNED(buffer->size, SZ_1M)) |
| 439 | chunk_size = SZ_1M; |
| 440 | |
Mitchel Humpherys | 0432d69 | 2013-01-08 17:03:10 -0800 | [diff] [blame] | 441 | return ion_create_chunked_sg_table(buf->buffer, chunk_size, |
| 442 | buffer->size); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 443 | } |
| 444 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 445 | struct sg_table *ion_cp_heap_map_dma(struct ion_heap *heap, |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 446 | struct ion_buffer *buffer) |
| 447 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 448 | return ion_cp_heap_create_sg_table(buffer); |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 449 | } |
| 450 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 451 | void ion_cp_heap_unmap_dma(struct ion_heap *heap, |
| 452 | struct ion_buffer *buffer) |
| 453 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 454 | if (buffer->sg_table) |
| 455 | sg_free_table(buffer->sg_table); |
| 456 | kfree(buffer->sg_table); |
| 457 | buffer->sg_table = 0; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 458 | } |
| 459 | |
| 460 | /** |
| 461 | * Call request region for SMI memory of this is the first mapping. |
| 462 | */ |
| 463 | static int ion_cp_request_region(struct ion_cp_heap *cp_heap) |
| 464 | { |
| 465 | int ret_value = 0; |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 466 | if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0) |
Laura Abbott | aedbe42 | 2012-08-03 17:06:22 -0700 | [diff] [blame] | 467 | if (cp_heap->heap_request_region) |
| 468 | ret_value = cp_heap->heap_request_region( |
| 469 | cp_heap->bus_id); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 470 | return ret_value; |
| 471 | } |
| 472 | |
| 473 | /** |
| 474 | * Call release region for SMI memory of this is the last un-mapping. |
| 475 | */ |
| 476 | static int ion_cp_release_region(struct ion_cp_heap *cp_heap) |
| 477 | { |
| 478 | int ret_value = 0; |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 479 | if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0) |
Laura Abbott | aedbe42 | 2012-08-03 17:06:22 -0700 | [diff] [blame] | 480 | if (cp_heap->heap_release_region) |
| 481 | ret_value = cp_heap->heap_release_region( |
| 482 | cp_heap->bus_id); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 483 | return ret_value; |
| 484 | } |
| 485 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 486 | void *ion_cp_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 487 | { |
| 488 | struct ion_cp_heap *cp_heap = |
| 489 | container_of(heap, struct ion_cp_heap, heap); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 490 | void *ret_value = NULL; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 491 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 492 | |
| 493 | mutex_lock(&cp_heap->lock); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 494 | if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) || |
| 495 | ((cp_heap->heap_protected == HEAP_PROTECTED) && |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 496 | !ION_IS_CACHED(buffer->flags))) { |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 497 | |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 498 | if (ion_cp_request_region(cp_heap)) { |
| 499 | mutex_unlock(&cp_heap->lock); |
| 500 | return NULL; |
| 501 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 502 | |
Laura Abbott | 7fe43d6 | 2013-03-25 16:44:58 -0700 | [diff] [blame] | 503 | if (cp_heap->cma) { |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 504 | int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; |
| 505 | struct page **pages = vmalloc( |
| 506 | sizeof(struct page *) * npages); |
| 507 | int i; |
| 508 | pgprot_t pgprot; |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 509 | |
Olav Haugan | 245db99 | 2013-03-26 11:25:41 -0700 | [diff] [blame] | 510 | if (!pages) { |
| 511 | mutex_unlock(&cp_heap->lock); |
| 512 | return ERR_PTR(-ENOMEM); |
| 513 | } |
| 514 | |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 515 | if (ION_IS_CACHED(buffer->flags)) |
| 516 | pgprot = PAGE_KERNEL; |
| 517 | else |
| 518 | pgprot = pgprot_writecombine(PAGE_KERNEL); |
| 519 | |
| 520 | for (i = 0; i < npages; i++) { |
| 521 | pages[i] = phys_to_page(buf->buffer + |
| 522 | i * PAGE_SIZE); |
| 523 | } |
| 524 | ret_value = vmap(pages, npages, VM_IOREMAP, pgprot); |
| 525 | vfree(pages); |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 526 | } else { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 527 | if (ION_IS_CACHED(buffer->flags)) |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 528 | ret_value = ioremap_cached(buf->buffer, |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 529 | buffer->size); |
| 530 | else |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 531 | ret_value = ioremap(buf->buffer, |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 532 | buffer->size); |
| 533 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 534 | |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 535 | if (!ret_value) { |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 536 | ion_cp_release_region(cp_heap); |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 537 | } else { |
| 538 | if (ION_IS_CACHED(buffer->flags)) |
| 539 | ++cp_heap->kmap_cached_count; |
| 540 | else |
| 541 | ++cp_heap->kmap_uncached_count; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 542 | atomic_inc(&buf->map_cnt); |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 543 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 544 | } |
| 545 | mutex_unlock(&cp_heap->lock); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 546 | return ret_value; |
| 547 | } |
| 548 | |
| 549 | void ion_cp_heap_unmap_kernel(struct ion_heap *heap, |
| 550 | struct ion_buffer *buffer) |
| 551 | { |
| 552 | struct ion_cp_heap *cp_heap = |
| 553 | container_of(heap, struct ion_cp_heap, heap); |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 554 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 555 | |
Laura Abbott | 7fe43d6 | 2013-03-25 16:44:58 -0700 | [diff] [blame] | 556 | if (cp_heap->cma) |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 557 | vunmap(buffer->vaddr); |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 558 | else |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 559 | __arm_iounmap(buffer->vaddr); |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 560 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 561 | buffer->vaddr = NULL; |
| 562 | |
| 563 | mutex_lock(&cp_heap->lock); |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 564 | if (ION_IS_CACHED(buffer->flags)) |
| 565 | --cp_heap->kmap_cached_count; |
| 566 | else |
| 567 | --cp_heap->kmap_uncached_count; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 568 | |
| 569 | atomic_dec(&buf->map_cnt); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 570 | ion_cp_release_region(cp_heap); |
| 571 | mutex_unlock(&cp_heap->lock); |
| 572 | |
| 573 | return; |
| 574 | } |
| 575 | |
| 576 | int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 577 | struct vm_area_struct *vma) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 578 | { |
| 579 | int ret_value = -EAGAIN; |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 580 | struct ion_cp_heap *cp_heap = |
| 581 | container_of(heap, struct ion_cp_heap, heap); |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 582 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 583 | |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 584 | mutex_lock(&cp_heap->lock); |
Mitchel Humpherys | 8d0a492 | 2013-01-21 16:49:09 -0800 | [diff] [blame] | 585 | if (cp_heap->heap_protected == HEAP_NOT_PROTECTED && !buf->is_secure) { |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 586 | if (ion_cp_request_region(cp_heap)) { |
| 587 | mutex_unlock(&cp_heap->lock); |
| 588 | return -EINVAL; |
| 589 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 590 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 591 | if (!ION_IS_CACHED(buffer->flags)) |
Olav Haugan | de074a7 | 2012-02-22 15:39:54 -0800 | [diff] [blame] | 592 | vma->vm_page_prot = pgprot_writecombine( |
| 593 | vma->vm_page_prot); |
| 594 | |
| 595 | ret_value = remap_pfn_range(vma, vma->vm_start, |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 596 | __phys_to_pfn(buf->buffer) + vma->vm_pgoff, |
Olav Haugan | de074a7 | 2012-02-22 15:39:54 -0800 | [diff] [blame] | 597 | vma->vm_end - vma->vm_start, |
| 598 | vma->vm_page_prot); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 599 | |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 600 | if (ret_value) { |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 601 | ion_cp_release_region(cp_heap); |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 602 | } else { |
| 603 | atomic_inc(&buf->map_cnt); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 604 | ++cp_heap->umap_count; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 605 | } |
| 606 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 607 | } |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 608 | mutex_unlock(&cp_heap->lock); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 609 | return ret_value; |
| 610 | } |
| 611 | |
| 612 | void ion_cp_heap_unmap_user(struct ion_heap *heap, |
| 613 | struct ion_buffer *buffer) |
| 614 | { |
| 615 | struct ion_cp_heap *cp_heap = |
| 616 | container_of(heap, struct ion_cp_heap, heap); |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 617 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 618 | |
| 619 | mutex_lock(&cp_heap->lock); |
| 620 | --cp_heap->umap_count; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 621 | atomic_dec(&buf->map_cnt); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 622 | ion_cp_release_region(cp_heap); |
| 623 | mutex_unlock(&cp_heap->lock); |
| 624 | } |
| 625 | |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 626 | static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s, |
| 627 | const struct rb_root *mem_map) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 628 | { |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 629 | unsigned long total_alloc; |
| 630 | unsigned long total_size; |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 631 | unsigned long umap_count; |
| 632 | unsigned long kmap_count; |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 633 | unsigned long heap_protected; |
Olav Haugan | e1f5d83 | 2011-12-13 15:16:28 -0800 | [diff] [blame] | 634 | struct ion_cp_heap *cp_heap = |
| 635 | container_of(heap, struct ion_cp_heap, heap); |
Olav Haugan | e1f5d83 | 2011-12-13 15:16:28 -0800 | [diff] [blame] | 636 | |
| 637 | mutex_lock(&cp_heap->lock); |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 638 | total_alloc = cp_heap->allocated_bytes; |
| 639 | total_size = cp_heap->total_size; |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 640 | umap_count = cp_heap->umap_count; |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 641 | kmap_count = ion_cp_get_total_kmap_count(cp_heap); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 642 | heap_protected = cp_heap->heap_protected == HEAP_PROTECTED; |
Olav Haugan | e1f5d83 | 2011-12-13 15:16:28 -0800 | [diff] [blame] | 643 | mutex_unlock(&cp_heap->lock); |
| 644 | |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 645 | seq_printf(s, "total bytes currently allocated: %lx\n", total_alloc); |
| 646 | seq_printf(s, "total heap size: %lx\n", total_size); |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 647 | seq_printf(s, "umapping count: %lx\n", umap_count); |
| 648 | seq_printf(s, "kmapping count: %lx\n", kmap_count); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 649 | seq_printf(s, "heap protected: %s\n", heap_protected ? "Yes" : "No"); |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 650 | |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 651 | if (mem_map) { |
| 652 | unsigned long base = cp_heap->base; |
| 653 | unsigned long size = cp_heap->total_size; |
| 654 | unsigned long end = base+size; |
| 655 | unsigned long last_end = base; |
| 656 | struct rb_node *n; |
| 657 | |
| 658 | seq_printf(s, "\nMemory Map\n"); |
| 659 | seq_printf(s, "%16.s %14.s %14.s %14.s\n", |
| 660 | "client", "start address", "end address", |
| 661 | "size (hex)"); |
| 662 | |
| 663 | for (n = rb_first(mem_map); n; n = rb_next(n)) { |
| 664 | struct mem_map_data *data = |
| 665 | rb_entry(n, struct mem_map_data, node); |
| 666 | const char *client_name = "(null)"; |
| 667 | |
| 668 | if (last_end < data->addr) { |
Laura Abbott | 1135c9e | 2013-03-13 15:33:40 -0700 | [diff] [blame] | 669 | phys_addr_t da; |
| 670 | |
| 671 | da = data->addr-1; |
| 672 | seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n", |
| 673 | "FREE", &last_end, &da, |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 674 | data->addr-last_end, |
| 675 | data->addr-last_end); |
| 676 | } |
| 677 | |
| 678 | if (data->client_name) |
| 679 | client_name = data->client_name; |
| 680 | |
Laura Abbott | 1135c9e | 2013-03-13 15:33:40 -0700 | [diff] [blame] | 681 | seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n", |
| 682 | client_name, &data->addr, |
| 683 | &data->addr_end, |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 684 | data->size, data->size); |
| 685 | last_end = data->addr_end+1; |
| 686 | } |
| 687 | if (last_end < end) { |
| 688 | seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE", |
| 689 | last_end, end-1, end-last_end, end-last_end); |
| 690 | } |
| 691 | } |
| 692 | |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 693 | return 0; |
Olav Haugan | e1f5d83 | 2011-12-13 15:16:28 -0800 | [diff] [blame] | 694 | } |
| 695 | |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 696 | int ion_cp_secure_heap(struct ion_heap *heap, int version, void *data) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 697 | { |
| 698 | int ret_value; |
| 699 | struct ion_cp_heap *cp_heap = |
| 700 | container_of(heap, struct ion_cp_heap, heap); |
| 701 | mutex_lock(&cp_heap->lock); |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 702 | if (cp_heap->umap_count == 0 && cp_heap->kmap_cached_count == 0) { |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 703 | ret_value = ion_cp_protect(heap, version, data); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 704 | } else { |
| 705 | pr_err("ION cannot secure heap with outstanding mappings: " |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 706 | "User space: %lu, kernel space (cached): %lu\n", |
| 707 | cp_heap->umap_count, cp_heap->kmap_cached_count); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 708 | ret_value = -EINVAL; |
| 709 | } |
| 710 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 711 | mutex_unlock(&cp_heap->lock); |
| 712 | return ret_value; |
| 713 | } |
| 714 | |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 715 | int ion_cp_unsecure_heap(struct ion_heap *heap, int version, void *data) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 716 | { |
| 717 | int ret_value = 0; |
| 718 | struct ion_cp_heap *cp_heap = |
| 719 | container_of(heap, struct ion_cp_heap, heap); |
| 720 | mutex_lock(&cp_heap->lock); |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 721 | ion_cp_unprotect(heap, version, data); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 722 | mutex_unlock(&cp_heap->lock); |
| 723 | return ret_value; |
| 724 | } |
| 725 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 726 | static struct ion_heap_ops cp_heap_ops = { |
| 727 | .allocate = ion_cp_heap_allocate, |
| 728 | .free = ion_cp_heap_free, |
| 729 | .phys = ion_cp_heap_phys, |
| 730 | .map_user = ion_cp_heap_map_user, |
| 731 | .unmap_user = ion_cp_heap_unmap_user, |
| 732 | .map_kernel = ion_cp_heap_map_kernel, |
| 733 | .unmap_kernel = ion_cp_heap_unmap_kernel, |
| 734 | .map_dma = ion_cp_heap_map_dma, |
| 735 | .unmap_dma = ion_cp_heap_unmap_dma, |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 736 | .print_debug = ion_cp_print_debug, |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 737 | .secure_heap = ion_cp_secure_heap, |
| 738 | .unsecure_heap = ion_cp_unsecure_heap, |
Laura Abbott | 9361930 | 2012-10-11 11:51:40 -0700 | [diff] [blame] | 739 | .secure_buffer = ion_cp_secure_buffer, |
| 740 | .unsecure_buffer = ion_cp_unsecure_buffer, |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 741 | }; |
| 742 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 743 | struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data) |
| 744 | { |
| 745 | struct ion_cp_heap *cp_heap; |
| 746 | int ret; |
| 747 | |
| 748 | cp_heap = kzalloc(sizeof(*cp_heap), GFP_KERNEL); |
| 749 | if (!cp_heap) |
| 750 | return ERR_PTR(-ENOMEM); |
| 751 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 752 | mutex_init(&cp_heap->lock); |
| 753 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 754 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 755 | cp_heap->allocated_bytes = 0; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 756 | cp_heap->umap_count = 0; |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 757 | cp_heap->kmap_cached_count = 0; |
| 758 | cp_heap->kmap_uncached_count = 0; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 759 | cp_heap->total_size = heap_data->size; |
| 760 | cp_heap->heap.ops = &cp_heap_ops; |
Mitchel Humpherys | 362b52b | 2012-09-13 10:53:22 -0700 | [diff] [blame] | 761 | cp_heap->heap.type = (enum ion_heap_type) ION_HEAP_TYPE_CP; |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 762 | cp_heap->heap_protected = HEAP_NOT_PROTECTED; |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 763 | cp_heap->secure_base = heap_data->base; |
Olav Haugan | 42ebe71 | 2012-01-10 16:30:58 -0800 | [diff] [blame] | 764 | cp_heap->secure_size = heap_data->size; |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 765 | cp_heap->has_outer_cache = heap_data->has_outer_cache; |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 766 | cp_heap->heap_size = heap_data->size; |
| 767 | |
Laura Abbott | f68983e | 2012-06-13 16:23:23 -0700 | [diff] [blame] | 768 | atomic_set(&cp_heap->protect_cnt, 0); |
Olav Haugan | 0703dbf | 2011-12-19 17:53:38 -0800 | [diff] [blame] | 769 | if (heap_data->extra_data) { |
| 770 | struct ion_cp_heap_pdata *extra_data = |
| 771 | heap_data->extra_data; |
| 772 | cp_heap->permission_type = extra_data->permission_type; |
Olav Haugan | 42ebe71 | 2012-01-10 16:30:58 -0800 | [diff] [blame] | 773 | if (extra_data->secure_size) { |
| 774 | cp_heap->secure_base = extra_data->secure_base; |
| 775 | cp_heap->secure_size = extra_data->secure_size; |
| 776 | } |
Olav Haugan | 0703dbf | 2011-12-19 17:53:38 -0800 | [diff] [blame] | 777 | if (extra_data->setup_region) |
| 778 | cp_heap->bus_id = extra_data->setup_region(); |
| 779 | if (extra_data->request_region) |
Laura Abbott | aedbe42 | 2012-08-03 17:06:22 -0700 | [diff] [blame] | 780 | cp_heap->heap_request_region = |
| 781 | extra_data->request_region; |
Olav Haugan | 0703dbf | 2011-12-19 17:53:38 -0800 | [diff] [blame] | 782 | if (extra_data->release_region) |
Laura Abbott | aedbe42 | 2012-08-03 17:06:22 -0700 | [diff] [blame] | 783 | cp_heap->heap_release_region = |
| 784 | extra_data->release_region; |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 785 | cp_heap->cma = extra_data->is_cma; |
Mitchel Humpherys | 345f023 | 2013-01-11 10:55:25 -0800 | [diff] [blame] | 786 | cp_heap->allow_non_secure_allocation = |
| 787 | extra_data->allow_nonsecure_alloc; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 788 | |
Olav Haugan | 0703dbf | 2011-12-19 17:53:38 -0800 | [diff] [blame] | 789 | } |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 790 | |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 791 | if (cp_heap->cma) { |
| 792 | cp_heap->pool = NULL; |
| 793 | cp_heap->cpu_addr = 0; |
| 794 | cp_heap->heap.priv = heap_data->priv; |
| 795 | } else { |
| 796 | cp_heap->pool = gen_pool_create(12, -1); |
| 797 | if (!cp_heap->pool) |
| 798 | goto free_heap; |
| 799 | |
| 800 | cp_heap->base = heap_data->base; |
| 801 | ret = gen_pool_add(cp_heap->pool, cp_heap->base, |
| 802 | heap_data->size, -1); |
| 803 | if (ret < 0) |
| 804 | goto destroy_pool; |
| 805 | |
| 806 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 807 | return &cp_heap->heap; |
| 808 | |
| 809 | destroy_pool: |
| 810 | gen_pool_destroy(cp_heap->pool); |
| 811 | |
| 812 | free_heap: |
| 813 | kfree(cp_heap); |
| 814 | |
| 815 | return ERR_PTR(-ENOMEM); |
| 816 | } |
| 817 | |
| 818 | void ion_cp_heap_destroy(struct ion_heap *heap) |
| 819 | { |
| 820 | struct ion_cp_heap *cp_heap = |
| 821 | container_of(heap, struct ion_cp_heap, heap); |
| 822 | |
| 823 | gen_pool_destroy(cp_heap->pool); |
| 824 | kfree(cp_heap); |
| 825 | cp_heap = NULL; |
| 826 | } |
| 827 | |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 828 | void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base, |
| 829 | unsigned long *size) \ |
| 830 | { |
| 831 | struct ion_cp_heap *cp_heap = |
| 832 | container_of(heap, struct ion_cp_heap, heap); |
| 833 | *base = cp_heap->base; |
| 834 | *size = cp_heap->total_size; |
| 835 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 836 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 837 | |