Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1 | /* |
| 2 | * drivers/gpu/ion/ion_cp_heap.c |
| 3 | * |
| 4 | * Copyright (C) 2011 Google, Inc. |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 5 | * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 6 | * |
| 7 | * This software is licensed under the terms of the GNU General Public |
| 8 | * License version 2, as published by the Free Software Foundation, and |
| 9 | * may be copied, distributed, and modified under those terms. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | */ |
| 17 | #include <linux/spinlock.h> |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 18 | #include <linux/delay.h> |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 19 | #include <linux/err.h> |
| 20 | #include <linux/genalloc.h> |
| 21 | #include <linux/io.h> |
Mitchel Humpherys | af2e5c5 | 2012-09-06 12:16:36 -0700 | [diff] [blame] | 22 | #include <linux/msm_ion.h> |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 23 | #include <linux/mm.h> |
| 24 | #include <linux/scatterlist.h> |
| 25 | #include <linux/slab.h> |
| 26 | #include <linux/vmalloc.h> |
| 27 | #include <linux/memory_alloc.h> |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 28 | #include <linux/seq_file.h> |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 29 | #include <linux/fmem.h> |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 30 | #include <linux/iommu.h> |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 31 | #include <linux/dma-mapping.h> |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 32 | |
| 33 | #include <asm/mach/map.h> |
| 34 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 35 | #include <mach/msm_memtypes.h> |
| 36 | #include <mach/scm.h> |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 37 | #include <mach/iommu_domains.h> |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 38 | |
| 39 | #include "ion_priv.h" |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 40 | |
| 41 | #include <asm/mach/map.h> |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 42 | #include <asm/cacheflush.h> |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 43 | |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 44 | #include "msm/ion_cp_common.h" |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 45 | /** |
| 46 | * struct ion_cp_heap - container for the heap and shared heap data |
| 47 | |
| 48 | * @heap: the heap information structure |
| 49 | * @pool: memory pool to allocate from. |
| 50 | * @base: the base address of the memory pool. |
| 51 | * @permission_type: Identifier for the memory used by SCM for protecting |
| 52 | * and unprotecting memory. |
Olav Haugan | 42ebe71 | 2012-01-10 16:30:58 -0800 | [diff] [blame] | 53 | * @secure_base: Base address used when securing a heap that is shared. |
| 54 | * @secure_size: Size used when securing a heap that is shared. |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 55 | * @lock: mutex to protect shared access. |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 56 | * @heap_protected: Indicates whether heap has been protected or not. |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 57 | * @allocated_bytes: the total number of allocated bytes from the pool. |
| 58 | * @total_size: the total size of the memory pool. |
| 59 | * @request_region: function pointer to call when first mapping of memory |
| 60 | * occurs. |
| 61 | * @release_region: function pointer to call when last mapping of memory |
| 62 | * unmapped. |
| 63 | * @bus_id: token used with request/release region. |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 64 | * @kmap_cached_count: the total number of times this heap has been mapped in |
| 65 | * kernel space (cached). |
| 66 | * @kmap_uncached_count:the total number of times this heap has been mapped in |
| 67 | * kernel space (un-cached). |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 68 | * @umap_count: the total number of times this heap has been mapped in |
| 69 | * user space. |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 70 | * @iommu_iova: saved iova when mapping full heap at once. |
| 71 | * @iommu_partition: partition used to map full heap. |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 72 | * @reusable: indicates if the memory should be reused via fmem. |
| 73 | * @reserved_vrange: reserved virtual address range for use with fmem |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 74 | * @iommu_map_all: Indicates whether we should map whole heap into IOMMU. |
| 75 | * @iommu_2x_map_domain: Indicates the domain to use for overmapping. |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 76 | * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise. |
| 77 | */ |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 78 | struct ion_cp_heap { |
| 79 | struct ion_heap heap; |
| 80 | struct gen_pool *pool; |
| 81 | ion_phys_addr_t base; |
| 82 | unsigned int permission_type; |
Olav Haugan | 42ebe71 | 2012-01-10 16:30:58 -0800 | [diff] [blame] | 83 | ion_phys_addr_t secure_base; |
| 84 | size_t secure_size; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 85 | struct mutex lock; |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 86 | unsigned int heap_protected; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 87 | unsigned long allocated_bytes; |
| 88 | unsigned long total_size; |
Laura Abbott | aedbe42 | 2012-08-03 17:06:22 -0700 | [diff] [blame] | 89 | int (*heap_request_region)(void *); |
| 90 | int (*heap_release_region)(void *); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 91 | void *bus_id; |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 92 | unsigned long kmap_cached_count; |
| 93 | unsigned long kmap_uncached_count; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 94 | unsigned long umap_count; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 95 | unsigned long iommu_iova[MAX_DOMAINS]; |
| 96 | unsigned long iommu_partition[MAX_DOMAINS]; |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 97 | int reusable; |
| 98 | void *reserved_vrange; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 99 | int iommu_map_all; |
| 100 | int iommu_2x_map_domain; |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 101 | unsigned int has_outer_cache; |
Laura Abbott | f68983e | 2012-06-13 16:23:23 -0700 | [diff] [blame] | 102 | atomic_t protect_cnt; |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 103 | void *cpu_addr; |
| 104 | size_t heap_size; |
| 105 | dma_addr_t handle; |
| 106 | int cma; |
Laura Abbott | ac96331 | 2012-12-11 15:09:03 -0800 | [diff] [blame] | 107 | int disallow_non_secure_allocation; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 108 | }; |
| 109 | |
| 110 | enum { |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 111 | HEAP_NOT_PROTECTED = 0, |
| 112 | HEAP_PROTECTED = 1, |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 113 | }; |
| 114 | |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 115 | struct ion_cp_buffer { |
| 116 | phys_addr_t buffer; |
| 117 | atomic_t secure_cnt; |
| 118 | int is_secure; |
Laura Abbott | 9361930 | 2012-10-11 11:51:40 -0700 | [diff] [blame] | 119 | int want_delayed_unsecure; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 120 | /* |
| 121 | * Currently all user/kernel mapping is protected by the heap lock. |
| 122 | * This is sufficient to protect the map count as well. The lock |
| 123 | * should be used to protect map_cnt if the whole heap lock is |
| 124 | * ever removed. |
| 125 | */ |
| 126 | atomic_t map_cnt; |
| 127 | /* |
| 128 | * protects secure_cnt for securing. |
| 129 | */ |
| 130 | struct mutex lock; |
Laura Abbott | 9361930 | 2012-10-11 11:51:40 -0700 | [diff] [blame] | 131 | int version; |
| 132 | void *data; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 133 | }; |
| 134 | |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 135 | #define DMA_ALLOC_TRIES 5 |
| 136 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 137 | static int ion_cp_protect_mem(unsigned int phy_base, unsigned int size, |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 138 | unsigned int permission_type, int version, |
| 139 | void *data); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 140 | |
| 141 | static int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size, |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 142 | unsigned int permission_type, int version, |
| 143 | void *data); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 144 | |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 145 | static int allocate_heap_memory(struct ion_heap *heap) |
| 146 | { |
| 147 | struct device *dev = heap->priv; |
| 148 | struct ion_cp_heap *cp_heap = |
| 149 | container_of(heap, struct ion_cp_heap, heap); |
| 150 | int ret; |
| 151 | int tries = 0; |
| 152 | DEFINE_DMA_ATTRS(attrs); |
| 153 | dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); |
| 154 | |
| 155 | |
| 156 | if (cp_heap->cpu_addr) |
| 157 | return 0; |
| 158 | |
| 159 | while (!cp_heap->cpu_addr && (++tries < DMA_ALLOC_TRIES)) { |
| 160 | cp_heap->cpu_addr = dma_alloc_attrs(dev, |
| 161 | cp_heap->heap_size, |
| 162 | &(cp_heap->handle), |
| 163 | 0, |
| 164 | &attrs); |
| 165 | if (!cp_heap->cpu_addr) |
| 166 | msleep(20); |
| 167 | } |
| 168 | |
| 169 | if (!cp_heap->cpu_addr) |
| 170 | goto out; |
| 171 | |
| 172 | cp_heap->base = cp_heap->handle; |
| 173 | |
| 174 | cp_heap->pool = gen_pool_create(12, -1); |
| 175 | if (!cp_heap->pool) |
| 176 | goto out_free; |
| 177 | |
| 178 | ret = gen_pool_add(cp_heap->pool, cp_heap->base, |
| 179 | cp_heap->heap_size, -1); |
| 180 | if (ret < 0) |
| 181 | goto out_pool; |
| 182 | |
| 183 | return 0; |
| 184 | |
| 185 | out_pool: |
| 186 | gen_pool_destroy(cp_heap->pool); |
| 187 | out_free: |
| 188 | dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr, |
| 189 | cp_heap->handle); |
| 190 | out: |
| 191 | return ION_CP_ALLOCATE_FAIL; |
| 192 | } |
| 193 | |
| 194 | static void free_heap_memory(struct ion_heap *heap) |
| 195 | { |
| 196 | struct device *dev = heap->priv; |
| 197 | struct ion_cp_heap *cp_heap = |
| 198 | container_of(heap, struct ion_cp_heap, heap); |
| 199 | |
| 200 | /* release memory */ |
| 201 | dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr, |
| 202 | cp_heap->handle); |
| 203 | gen_pool_destroy(cp_heap->pool); |
| 204 | cp_heap->pool = NULL; |
| 205 | cp_heap->cpu_addr = 0; |
| 206 | } |
| 207 | |
| 208 | |
| 209 | |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 210 | /** |
| 211 | * Get the total number of kernel mappings. |
| 212 | * Must be called with heap->lock locked. |
| 213 | */ |
| 214 | static unsigned long ion_cp_get_total_kmap_count( |
| 215 | const struct ion_cp_heap *cp_heap) |
| 216 | { |
| 217 | return cp_heap->kmap_cached_count + cp_heap->kmap_uncached_count; |
| 218 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 219 | |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 220 | static int ion_on_first_alloc(struct ion_heap *heap) |
| 221 | { |
| 222 | struct ion_cp_heap *cp_heap = |
| 223 | container_of(heap, struct ion_cp_heap, heap); |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 224 | int ret_value; |
| 225 | |
| 226 | if (cp_heap->reusable) { |
| 227 | ret_value = fmem_set_state(FMEM_C_STATE); |
| 228 | if (ret_value) |
| 229 | return 1; |
| 230 | } |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 231 | |
| 232 | if (cp_heap->cma) { |
| 233 | ret_value = allocate_heap_memory(heap); |
| 234 | if (ret_value) |
| 235 | return 1; |
| 236 | } |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 237 | return 0; |
| 238 | } |
| 239 | |
| 240 | static void ion_on_last_free(struct ion_heap *heap) |
| 241 | { |
| 242 | struct ion_cp_heap *cp_heap = |
| 243 | container_of(heap, struct ion_cp_heap, heap); |
| 244 | |
| 245 | if (cp_heap->reusable) |
| 246 | if (fmem_set_state(FMEM_T_STATE) != 0) |
| 247 | pr_err("%s: unable to transition heap to T-state\n", |
| 248 | __func__); |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 249 | |
| 250 | if (cp_heap->cma) |
| 251 | free_heap_memory(heap); |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 252 | } |
| 253 | |
Laura Abbott | 9361930 | 2012-10-11 11:51:40 -0700 | [diff] [blame] | 254 | /* Must be protected by ion_cp_buffer lock */ |
| 255 | static int __ion_cp_protect_buffer(struct ion_buffer *buffer, int version, |
| 256 | void *data, int flags) |
| 257 | { |
| 258 | struct ion_cp_buffer *buf = buffer->priv_virt; |
| 259 | int ret_value = 0; |
| 260 | |
| 261 | if (atomic_inc_return(&buf->secure_cnt) == 1) { |
| 262 | ret_value = ion_cp_protect_mem(buf->buffer, |
| 263 | buffer->size, 0, |
| 264 | version, data); |
| 265 | |
| 266 | if (ret_value) { |
| 267 | pr_err("Failed to secure buffer %p, error %d\n", |
| 268 | buffer, ret_value); |
| 269 | atomic_dec(&buf->secure_cnt); |
| 270 | } else { |
| 271 | pr_debug("Protected buffer %p from %x-%x\n", |
| 272 | buffer, buf->buffer, |
| 273 | buf->buffer + buffer->size); |
| 274 | buf->want_delayed_unsecure |= |
| 275 | flags & ION_UNSECURE_DELAYED ? 1 : 0; |
| 276 | buf->data = data; |
| 277 | buf->version = version; |
| 278 | } |
| 279 | } |
| 280 | pr_debug("buffer %p protect count %d\n", buffer, |
| 281 | atomic_read(&buf->secure_cnt)); |
| 282 | BUG_ON(atomic_read(&buf->secure_cnt) < 0); |
| 283 | return ret_value; |
| 284 | } |
| 285 | |
| 286 | /* Must be protected by ion_cp_buffer lock */ |
| 287 | static int __ion_cp_unprotect_buffer(struct ion_buffer *buffer, int version, |
| 288 | void *data, int force_unsecure) |
| 289 | { |
| 290 | struct ion_cp_buffer *buf = buffer->priv_virt; |
| 291 | int ret_value = 0; |
| 292 | |
| 293 | if (force_unsecure) { |
| 294 | if (!buf->is_secure || atomic_read(&buf->secure_cnt) == 0) |
| 295 | return 0; |
| 296 | |
| 297 | if (atomic_read(&buf->secure_cnt) != 1) { |
| 298 | WARN(1, "Forcing unsecure of buffer with outstanding secure count %d!\n", |
| 299 | atomic_read(&buf->secure_cnt)); |
| 300 | atomic_set(&buf->secure_cnt, 1); |
| 301 | } |
| 302 | } |
| 303 | |
| 304 | if (atomic_dec_and_test(&buf->secure_cnt)) { |
| 305 | ret_value = ion_cp_unprotect_mem( |
| 306 | buf->buffer, buffer->size, |
| 307 | 0, version, data); |
| 308 | |
| 309 | if (ret_value) { |
| 310 | pr_err("Failed to unsecure buffer %p, error %d\n", |
| 311 | buffer, ret_value); |
| 312 | /* |
| 313 | * If the force unsecure is happening, the buffer |
| 314 | * is being destroyed. We failed to unsecure the |
| 315 | * buffer even though the memory is given back. |
| 316 | * Just die now rather than discovering later what |
| 317 | * happens when trying to use the secured memory as |
| 318 | * unsecured... |
| 319 | */ |
| 320 | BUG_ON(force_unsecure); |
| 321 | /* Bump the count back up one to try again later */ |
| 322 | atomic_inc(&buf->secure_cnt); |
| 323 | } else { |
| 324 | buf->version = -1; |
| 325 | buf->data = NULL; |
| 326 | } |
| 327 | } |
| 328 | pr_debug("buffer %p unprotect count %d\n", buffer, |
| 329 | atomic_read(&buf->secure_cnt)); |
| 330 | BUG_ON(atomic_read(&buf->secure_cnt) < 0); |
| 331 | return ret_value; |
| 332 | } |
| 333 | |
| 334 | int ion_cp_secure_buffer(struct ion_buffer *buffer, int version, void *data, |
| 335 | int flags) |
| 336 | { |
| 337 | int ret_value; |
| 338 | struct ion_cp_buffer *buf = buffer->priv_virt; |
| 339 | |
| 340 | mutex_lock(&buf->lock); |
| 341 | if (!buf->is_secure) { |
| 342 | pr_err("%s: buffer %p was not allocated as secure\n", |
| 343 | __func__, buffer); |
| 344 | ret_value = -EINVAL; |
| 345 | goto out_unlock; |
| 346 | } |
| 347 | |
| 348 | if (ION_IS_CACHED(buffer->flags)) { |
| 349 | pr_err("%s: buffer %p was allocated as cached\n", |
| 350 | __func__, buffer); |
| 351 | ret_value = -EINVAL; |
| 352 | goto out_unlock; |
| 353 | } |
| 354 | |
| 355 | if (atomic_read(&buf->map_cnt)) { |
| 356 | pr_err("%s: cannot secure buffer %p with outstanding mappings. Total count: %d", |
| 357 | __func__, buffer, atomic_read(&buf->map_cnt)); |
| 358 | ret_value = -EINVAL; |
| 359 | goto out_unlock; |
| 360 | } |
| 361 | |
| 362 | if (atomic_read(&buf->secure_cnt)) { |
| 363 | if (buf->version != version || buf->data != data) { |
| 364 | pr_err("%s: Trying to re-secure buffer with different values", |
| 365 | __func__); |
| 366 | pr_err("Last secured version: %d Currrent %d\n", |
| 367 | buf->version, version); |
| 368 | pr_err("Last secured data: %p current %p\n", |
| 369 | buf->data, data); |
| 370 | ret_value = -EINVAL; |
| 371 | goto out_unlock; |
| 372 | } |
| 373 | } |
| 374 | ret_value = __ion_cp_protect_buffer(buffer, version, data, flags); |
| 375 | |
| 376 | out_unlock: |
| 377 | mutex_unlock(&buf->lock); |
| 378 | return ret_value; |
| 379 | } |
| 380 | |
| 381 | int ion_cp_unsecure_buffer(struct ion_buffer *buffer, int force_unsecure) |
| 382 | { |
| 383 | int ret_value = 0; |
| 384 | struct ion_cp_buffer *buf = buffer->priv_virt; |
| 385 | |
| 386 | mutex_lock(&buf->lock); |
| 387 | ret_value = __ion_cp_unprotect_buffer(buffer, buf->version, buf->data, |
| 388 | force_unsecure); |
| 389 | mutex_unlock(&buf->lock); |
| 390 | return ret_value; |
| 391 | } |
| 392 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 393 | /** |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 394 | * Protects memory if heap is unsecured heap. Also ensures that we are in |
| 395 | * the correct FMEM state if this heap is a reusable heap. |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 396 | * Must be called with heap->lock locked. |
| 397 | */ |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 398 | static int ion_cp_protect(struct ion_heap *heap, int version, void *data) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 399 | { |
| 400 | struct ion_cp_heap *cp_heap = |
| 401 | container_of(heap, struct ion_cp_heap, heap); |
| 402 | int ret_value = 0; |
| 403 | |
Laura Abbott | f68983e | 2012-06-13 16:23:23 -0700 | [diff] [blame] | 404 | if (atomic_inc_return(&cp_heap->protect_cnt) == 1) { |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 405 | /* Make sure we are in C state when the heap is protected. */ |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 406 | if (!cp_heap->allocated_bytes) |
| 407 | if (ion_on_first_alloc(heap)) |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 408 | goto out; |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 409 | |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 410 | ret_value = ion_cp_protect_mem(cp_heap->secure_base, |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 411 | cp_heap->secure_size, cp_heap->permission_type, |
| 412 | version, data); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 413 | if (ret_value) { |
| 414 | pr_err("Failed to protect memory for heap %s - " |
Olav Haugan | 9b2d1c2 | 2012-01-09 15:23:08 -0800 | [diff] [blame] | 415 | "error code: %d\n", heap->name, ret_value); |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 416 | |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 417 | if (!cp_heap->allocated_bytes) |
| 418 | ion_on_last_free(heap); |
| 419 | |
Laura Abbott | f68983e | 2012-06-13 16:23:23 -0700 | [diff] [blame] | 420 | atomic_dec(&cp_heap->protect_cnt); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 421 | } else { |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 422 | cp_heap->heap_protected = HEAP_PROTECTED; |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 423 | pr_debug("Protected heap %s @ 0x%lx\n", |
| 424 | heap->name, cp_heap->base); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 425 | } |
| 426 | } |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 427 | out: |
Laura Abbott | f68983e | 2012-06-13 16:23:23 -0700 | [diff] [blame] | 428 | pr_debug("%s: protect count is %d\n", __func__, |
| 429 | atomic_read(&cp_heap->protect_cnt)); |
| 430 | BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 431 | return ret_value; |
| 432 | } |
| 433 | |
| 434 | /** |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 435 | * Unprotects memory if heap is secure heap. Also ensures that we are in |
| 436 | * the correct FMEM state if this heap is a reusable heap. |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 437 | * Must be called with heap->lock locked. |
| 438 | */ |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 439 | static void ion_cp_unprotect(struct ion_heap *heap, int version, void *data) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 440 | { |
| 441 | struct ion_cp_heap *cp_heap = |
| 442 | container_of(heap, struct ion_cp_heap, heap); |
| 443 | |
Laura Abbott | f68983e | 2012-06-13 16:23:23 -0700 | [diff] [blame] | 444 | if (atomic_dec_and_test(&cp_heap->protect_cnt)) { |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 445 | int error_code = ion_cp_unprotect_mem( |
Olav Haugan | 42ebe71 | 2012-01-10 16:30:58 -0800 | [diff] [blame] | 446 | cp_heap->secure_base, cp_heap->secure_size, |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 447 | cp_heap->permission_type, version, data); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 448 | if (error_code) { |
| 449 | pr_err("Failed to un-protect memory for heap %s - " |
Olav Haugan | 9b2d1c2 | 2012-01-09 15:23:08 -0800 | [diff] [blame] | 450 | "error code: %d\n", heap->name, error_code); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 451 | } else { |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 452 | cp_heap->heap_protected = HEAP_NOT_PROTECTED; |
Olav Haugan | 9b2d1c2 | 2012-01-09 15:23:08 -0800 | [diff] [blame] | 453 | pr_debug("Un-protected heap %s @ 0x%x\n", heap->name, |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 454 | (unsigned int) cp_heap->base); |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 455 | |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 456 | if (!cp_heap->allocated_bytes) |
| 457 | ion_on_last_free(heap); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 458 | } |
| 459 | } |
Laura Abbott | f68983e | 2012-06-13 16:23:23 -0700 | [diff] [blame] | 460 | pr_debug("%s: protect count is %d\n", __func__, |
| 461 | atomic_read(&cp_heap->protect_cnt)); |
| 462 | BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 463 | } |
| 464 | |
| 465 | ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap, |
| 466 | unsigned long size, |
| 467 | unsigned long align, |
| 468 | unsigned long flags) |
| 469 | { |
| 470 | unsigned long offset; |
| 471 | unsigned long secure_allocation = flags & ION_SECURE; |
| 472 | |
| 473 | struct ion_cp_heap *cp_heap = |
| 474 | container_of(heap, struct ion_cp_heap, heap); |
| 475 | |
| 476 | mutex_lock(&cp_heap->lock); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 477 | if (!secure_allocation && cp_heap->heap_protected == HEAP_PROTECTED) { |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 478 | mutex_unlock(&cp_heap->lock); |
| 479 | pr_err("ION cannot allocate un-secure memory from protected" |
Olav Haugan | 9b2d1c2 | 2012-01-09 15:23:08 -0800 | [diff] [blame] | 480 | " heap %s\n", heap->name); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 481 | return ION_CP_ALLOCATE_FAIL; |
| 482 | } |
| 483 | |
Laura Abbott | ac96331 | 2012-12-11 15:09:03 -0800 | [diff] [blame] | 484 | if (!secure_allocation && cp_heap->disallow_non_secure_allocation) { |
| 485 | mutex_unlock(&cp_heap->lock); |
| 486 | pr_debug("%s: non-secure allocation disallowed from this heap\n", |
| 487 | __func__); |
| 488 | return ION_CP_ALLOCATE_FAIL; |
| 489 | } |
| 490 | |
Laura Abbott | 087db59 | 2012-11-01 09:41:37 -0700 | [diff] [blame] | 491 | /* |
| 492 | * The check above already checked for non-secure allocations when the |
| 493 | * heap is protected. HEAP_PROTECTED implies that this must be a secure |
| 494 | * allocation. If the heap is protected and there are userspace or |
| 495 | * cached kernel mappings, something has gone wrong in the security |
| 496 | * model. |
| 497 | */ |
| 498 | if (cp_heap->heap_protected == HEAP_PROTECTED) { |
| 499 | BUG_ON(cp_heap->umap_count != 0); |
| 500 | BUG_ON(cp_heap->kmap_cached_count != 0); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 501 | } |
| 502 | |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 503 | /* |
| 504 | * if this is the first reusable allocation, transition |
| 505 | * the heap |
| 506 | */ |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 507 | if (!cp_heap->allocated_bytes) |
| 508 | if (ion_on_first_alloc(heap)) { |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 509 | mutex_unlock(&cp_heap->lock); |
| 510 | return ION_RESERVED_ALLOCATE_FAIL; |
| 511 | } |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 512 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 513 | cp_heap->allocated_bytes += size; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 514 | mutex_unlock(&cp_heap->lock); |
| 515 | |
| 516 | offset = gen_pool_alloc_aligned(cp_heap->pool, |
| 517 | size, ilog2(align)); |
| 518 | |
| 519 | if (!offset) { |
| 520 | mutex_lock(&cp_heap->lock); |
Olav Haugan | d710ed1 | 2012-04-19 14:23:04 -0700 | [diff] [blame] | 521 | cp_heap->allocated_bytes -= size; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 522 | if ((cp_heap->total_size - |
Olav Haugan | d710ed1 | 2012-04-19 14:23:04 -0700 | [diff] [blame] | 523 | cp_heap->allocated_bytes) >= size) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 524 | pr_debug("%s: heap %s has enough memory (%lx) but" |
| 525 | " the allocation of size %lx still failed." |
Olav Haugan | 9b2d1c2 | 2012-01-09 15:23:08 -0800 | [diff] [blame] | 526 | " Memory is probably fragmented.\n", |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 527 | __func__, heap->name, |
| 528 | cp_heap->total_size - |
| 529 | cp_heap->allocated_bytes, size); |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 530 | if (!cp_heap->allocated_bytes && |
| 531 | cp_heap->heap_protected == HEAP_NOT_PROTECTED) |
| 532 | ion_on_last_free(heap); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 533 | mutex_unlock(&cp_heap->lock); |
| 534 | |
| 535 | return ION_CP_ALLOCATE_FAIL; |
| 536 | } |
| 537 | |
| 538 | return offset; |
| 539 | } |
| 540 | |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 541 | static void iommu_unmap_all(unsigned long domain_num, |
| 542 | struct ion_cp_heap *cp_heap) |
| 543 | { |
| 544 | unsigned long left_to_unmap = cp_heap->total_size; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 545 | unsigned long page_size = SZ_64K; |
| 546 | |
| 547 | struct iommu_domain *domain = msm_get_iommu_domain(domain_num); |
| 548 | if (domain) { |
| 549 | unsigned long temp_iova = cp_heap->iommu_iova[domain_num]; |
| 550 | |
| 551 | while (left_to_unmap) { |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 552 | iommu_unmap(domain, temp_iova, page_size); |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 553 | temp_iova += page_size; |
| 554 | left_to_unmap -= page_size; |
| 555 | } |
| 556 | if (domain_num == cp_heap->iommu_2x_map_domain) |
| 557 | msm_iommu_unmap_extra(domain, temp_iova, |
| 558 | cp_heap->total_size, SZ_64K); |
| 559 | } else { |
| 560 | pr_err("Unable to get IOMMU domain %lu\n", domain_num); |
| 561 | } |
| 562 | } |
| 563 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 564 | void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr, |
| 565 | unsigned long size) |
| 566 | { |
| 567 | struct ion_cp_heap *cp_heap = |
| 568 | container_of(heap, struct ion_cp_heap, heap); |
| 569 | |
| 570 | if (addr == ION_CP_ALLOCATE_FAIL) |
| 571 | return; |
| 572 | gen_pool_free(cp_heap->pool, addr, size); |
| 573 | |
| 574 | mutex_lock(&cp_heap->lock); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 575 | cp_heap->allocated_bytes -= size; |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 576 | |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 577 | if (!cp_heap->allocated_bytes && |
| 578 | cp_heap->heap_protected == HEAP_NOT_PROTECTED) |
| 579 | ion_on_last_free(heap); |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 580 | |
| 581 | /* Unmap everything if we previously mapped the whole heap at once. */ |
| 582 | if (!cp_heap->allocated_bytes) { |
| 583 | unsigned int i; |
| 584 | for (i = 0; i < MAX_DOMAINS; ++i) { |
| 585 | if (cp_heap->iommu_iova[i]) { |
| 586 | unsigned long vaddr_len = cp_heap->total_size; |
| 587 | |
| 588 | if (i == cp_heap->iommu_2x_map_domain) |
| 589 | vaddr_len <<= 1; |
| 590 | iommu_unmap_all(i, cp_heap); |
| 591 | |
| 592 | msm_free_iova_address(cp_heap->iommu_iova[i], i, |
| 593 | cp_heap->iommu_partition[i], |
| 594 | vaddr_len); |
| 595 | } |
| 596 | cp_heap->iommu_iova[i] = 0; |
| 597 | cp_heap->iommu_partition[i] = 0; |
| 598 | } |
| 599 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 600 | mutex_unlock(&cp_heap->lock); |
| 601 | } |
| 602 | |
| 603 | static int ion_cp_heap_phys(struct ion_heap *heap, |
| 604 | struct ion_buffer *buffer, |
| 605 | ion_phys_addr_t *addr, size_t *len) |
| 606 | { |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 607 | struct ion_cp_buffer *buf = buffer->priv_virt; |
| 608 | |
| 609 | *addr = buf->buffer; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 610 | *len = buffer->size; |
| 611 | return 0; |
| 612 | } |
| 613 | |
| 614 | static int ion_cp_heap_allocate(struct ion_heap *heap, |
| 615 | struct ion_buffer *buffer, |
| 616 | unsigned long size, unsigned long align, |
| 617 | unsigned long flags) |
| 618 | { |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 619 | struct ion_cp_buffer *buf; |
| 620 | phys_addr_t addr; |
| 621 | |
| 622 | buf = kzalloc(sizeof(*buf), GFP_KERNEL); |
| 623 | if (!buf) |
| 624 | return ION_CP_ALLOCATE_FAIL; |
| 625 | |
| 626 | addr = ion_cp_allocate(heap, size, align, flags); |
| 627 | if (addr == ION_CP_ALLOCATE_FAIL) |
| 628 | return -ENOMEM; |
| 629 | |
| 630 | buf->buffer = addr; |
Laura Abbott | 9361930 | 2012-10-11 11:51:40 -0700 | [diff] [blame] | 631 | buf->want_delayed_unsecure = 0; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 632 | atomic_set(&buf->secure_cnt, 0); |
| 633 | mutex_init(&buf->lock); |
| 634 | buf->is_secure = flags & ION_SECURE ? 1 : 0; |
| 635 | buffer->priv_virt = buf; |
| 636 | |
| 637 | return 0; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 638 | } |
| 639 | |
| 640 | static void ion_cp_heap_free(struct ion_buffer *buffer) |
| 641 | { |
| 642 | struct ion_heap *heap = buffer->heap; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 643 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 644 | |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 645 | ion_cp_free(heap, buf->buffer, buffer->size); |
| 646 | WARN_ON(atomic_read(&buf->secure_cnt)); |
| 647 | WARN_ON(atomic_read(&buf->map_cnt)); |
| 648 | kfree(buf); |
| 649 | |
| 650 | buffer->priv_virt = NULL; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 651 | } |
| 652 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 653 | struct sg_table *ion_cp_heap_create_sg_table(struct ion_buffer *buffer) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 654 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 655 | struct sg_table *table; |
| 656 | int ret; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 657 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 658 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 659 | table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); |
| 660 | if (!table) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 661 | return ERR_PTR(-ENOMEM); |
| 662 | |
Laura Abbott | c3824d7 | 2012-11-02 09:57:19 -0700 | [diff] [blame] | 663 | if (buf->is_secure && IS_ALIGNED(buffer->size, SZ_1M)) { |
Laura Abbott | e84d77e | 2012-10-10 16:59:46 -0700 | [diff] [blame] | 664 | int n_chunks; |
| 665 | int i; |
| 666 | struct scatterlist *sg; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 667 | |
Laura Abbott | e84d77e | 2012-10-10 16:59:46 -0700 | [diff] [blame] | 668 | /* Count number of 1MB chunks. Alignment is already checked. */ |
| 669 | n_chunks = buffer->size >> 20; |
| 670 | |
| 671 | ret = sg_alloc_table(table, n_chunks, GFP_KERNEL); |
| 672 | if (ret) |
| 673 | goto err0; |
| 674 | |
| 675 | for_each_sg(table->sgl, sg, table->nents, i) { |
| 676 | sg_dma_address(sg) = buf->buffer + i * SZ_1M; |
| 677 | sg->length = SZ_1M; |
| 678 | sg->offset = 0; |
| 679 | } |
| 680 | } else { |
| 681 | ret = sg_alloc_table(table, 1, GFP_KERNEL); |
| 682 | if (ret) |
| 683 | goto err0; |
| 684 | |
| 685 | table->sgl->length = buffer->size; |
| 686 | table->sgl->offset = 0; |
| 687 | table->sgl->dma_address = buf->buffer; |
| 688 | } |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 689 | |
| 690 | return table; |
| 691 | err0: |
| 692 | kfree(table); |
| 693 | return ERR_PTR(ret); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 694 | } |
| 695 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 696 | struct sg_table *ion_cp_heap_map_dma(struct ion_heap *heap, |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 697 | struct ion_buffer *buffer) |
| 698 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 699 | return ion_cp_heap_create_sg_table(buffer); |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 700 | } |
| 701 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 702 | void ion_cp_heap_unmap_dma(struct ion_heap *heap, |
| 703 | struct ion_buffer *buffer) |
| 704 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 705 | if (buffer->sg_table) |
| 706 | sg_free_table(buffer->sg_table); |
| 707 | kfree(buffer->sg_table); |
| 708 | buffer->sg_table = 0; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 709 | } |
| 710 | |
| 711 | /** |
| 712 | * Call request region for SMI memory of this is the first mapping. |
| 713 | */ |
| 714 | static int ion_cp_request_region(struct ion_cp_heap *cp_heap) |
| 715 | { |
| 716 | int ret_value = 0; |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 717 | if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0) |
Laura Abbott | aedbe42 | 2012-08-03 17:06:22 -0700 | [diff] [blame] | 718 | if (cp_heap->heap_request_region) |
| 719 | ret_value = cp_heap->heap_request_region( |
| 720 | cp_heap->bus_id); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 721 | return ret_value; |
| 722 | } |
| 723 | |
| 724 | /** |
| 725 | * Call release region for SMI memory of this is the last un-mapping. |
| 726 | */ |
| 727 | static int ion_cp_release_region(struct ion_cp_heap *cp_heap) |
| 728 | { |
| 729 | int ret_value = 0; |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 730 | if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0) |
Laura Abbott | aedbe42 | 2012-08-03 17:06:22 -0700 | [diff] [blame] | 731 | if (cp_heap->heap_release_region) |
| 732 | ret_value = cp_heap->heap_release_region( |
| 733 | cp_heap->bus_id); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 734 | return ret_value; |
| 735 | } |
| 736 | |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 737 | void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base, |
| 738 | void *virt_base, unsigned long flags) |
| 739 | { |
| 740 | int ret; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 741 | struct ion_cp_buffer *buf = buffer->priv_virt; |
| 742 | unsigned int offset = buf->buffer - phys_base; |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 743 | unsigned long start = ((unsigned long)virt_base) + offset; |
| 744 | const struct mem_type *type = ION_IS_CACHED(flags) ? |
| 745 | get_mem_type(MT_DEVICE_CACHED) : |
| 746 | get_mem_type(MT_DEVICE); |
| 747 | |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 748 | if (phys_base > buf->buffer) |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 749 | return NULL; |
| 750 | |
| 751 | |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 752 | ret = ioremap_pages(start, buf->buffer, buffer->size, type); |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 753 | |
| 754 | if (!ret) |
| 755 | return (void *)start; |
| 756 | else |
| 757 | return NULL; |
| 758 | } |
| 759 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 760 | void *ion_cp_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 761 | { |
| 762 | struct ion_cp_heap *cp_heap = |
| 763 | container_of(heap, struct ion_cp_heap, heap); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 764 | void *ret_value = NULL; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 765 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 766 | |
| 767 | mutex_lock(&cp_heap->lock); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 768 | if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) || |
| 769 | ((cp_heap->heap_protected == HEAP_PROTECTED) && |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 770 | !ION_IS_CACHED(buffer->flags))) { |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 771 | |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 772 | if (ion_cp_request_region(cp_heap)) { |
| 773 | mutex_unlock(&cp_heap->lock); |
| 774 | return NULL; |
| 775 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 776 | |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 777 | if (cp_heap->reusable) { |
| 778 | ret_value = ion_map_fmem_buffer(buffer, cp_heap->base, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 779 | cp_heap->reserved_vrange, buffer->flags); |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 780 | } else if (cp_heap->cma) { |
| 781 | int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; |
| 782 | struct page **pages = vmalloc( |
| 783 | sizeof(struct page *) * npages); |
| 784 | int i; |
| 785 | pgprot_t pgprot; |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 786 | |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 787 | if (ION_IS_CACHED(buffer->flags)) |
| 788 | pgprot = PAGE_KERNEL; |
| 789 | else |
| 790 | pgprot = pgprot_writecombine(PAGE_KERNEL); |
| 791 | |
| 792 | for (i = 0; i < npages; i++) { |
| 793 | pages[i] = phys_to_page(buf->buffer + |
| 794 | i * PAGE_SIZE); |
| 795 | } |
| 796 | ret_value = vmap(pages, npages, VM_IOREMAP, pgprot); |
| 797 | vfree(pages); |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 798 | } else { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 799 | if (ION_IS_CACHED(buffer->flags)) |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 800 | ret_value = ioremap_cached(buf->buffer, |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 801 | buffer->size); |
| 802 | else |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 803 | ret_value = ioremap(buf->buffer, |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 804 | buffer->size); |
| 805 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 806 | |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 807 | if (!ret_value) { |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 808 | ion_cp_release_region(cp_heap); |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 809 | } else { |
| 810 | if (ION_IS_CACHED(buffer->flags)) |
| 811 | ++cp_heap->kmap_cached_count; |
| 812 | else |
| 813 | ++cp_heap->kmap_uncached_count; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 814 | atomic_inc(&buf->map_cnt); |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 815 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 816 | } |
| 817 | mutex_unlock(&cp_heap->lock); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 818 | return ret_value; |
| 819 | } |
| 820 | |
| 821 | void ion_cp_heap_unmap_kernel(struct ion_heap *heap, |
| 822 | struct ion_buffer *buffer) |
| 823 | { |
| 824 | struct ion_cp_heap *cp_heap = |
| 825 | container_of(heap, struct ion_cp_heap, heap); |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 826 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 827 | |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 828 | if (cp_heap->reusable) |
| 829 | unmap_kernel_range((unsigned long)buffer->vaddr, buffer->size); |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 830 | else if (cp_heap->cma) |
| 831 | vunmap(buffer->vaddr); |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 832 | else |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 833 | __arm_iounmap(buffer->vaddr); |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 834 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 835 | buffer->vaddr = NULL; |
| 836 | |
| 837 | mutex_lock(&cp_heap->lock); |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 838 | if (ION_IS_CACHED(buffer->flags)) |
| 839 | --cp_heap->kmap_cached_count; |
| 840 | else |
| 841 | --cp_heap->kmap_uncached_count; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 842 | |
| 843 | atomic_dec(&buf->map_cnt); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 844 | ion_cp_release_region(cp_heap); |
| 845 | mutex_unlock(&cp_heap->lock); |
| 846 | |
| 847 | return; |
| 848 | } |
| 849 | |
| 850 | int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 851 | struct vm_area_struct *vma) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 852 | { |
| 853 | int ret_value = -EAGAIN; |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 854 | struct ion_cp_heap *cp_heap = |
| 855 | container_of(heap, struct ion_cp_heap, heap); |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 856 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 857 | |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 858 | mutex_lock(&cp_heap->lock); |
| 859 | if (cp_heap->heap_protected == HEAP_NOT_PROTECTED) { |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 860 | if (ion_cp_request_region(cp_heap)) { |
| 861 | mutex_unlock(&cp_heap->lock); |
| 862 | return -EINVAL; |
| 863 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 864 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 865 | if (!ION_IS_CACHED(buffer->flags)) |
Olav Haugan | de074a7 | 2012-02-22 15:39:54 -0800 | [diff] [blame] | 866 | vma->vm_page_prot = pgprot_writecombine( |
| 867 | vma->vm_page_prot); |
| 868 | |
| 869 | ret_value = remap_pfn_range(vma, vma->vm_start, |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 870 | __phys_to_pfn(buf->buffer) + vma->vm_pgoff, |
Olav Haugan | de074a7 | 2012-02-22 15:39:54 -0800 | [diff] [blame] | 871 | vma->vm_end - vma->vm_start, |
| 872 | vma->vm_page_prot); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 873 | |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 874 | if (ret_value) { |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 875 | ion_cp_release_region(cp_heap); |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 876 | } else { |
| 877 | atomic_inc(&buf->map_cnt); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 878 | ++cp_heap->umap_count; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 879 | } |
| 880 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 881 | } |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 882 | mutex_unlock(&cp_heap->lock); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 883 | return ret_value; |
| 884 | } |
| 885 | |
| 886 | void ion_cp_heap_unmap_user(struct ion_heap *heap, |
| 887 | struct ion_buffer *buffer) |
| 888 | { |
| 889 | struct ion_cp_heap *cp_heap = |
| 890 | container_of(heap, struct ion_cp_heap, heap); |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 891 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 892 | |
| 893 | mutex_lock(&cp_heap->lock); |
| 894 | --cp_heap->umap_count; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 895 | atomic_dec(&buf->map_cnt); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 896 | ion_cp_release_region(cp_heap); |
| 897 | mutex_unlock(&cp_heap->lock); |
| 898 | } |
| 899 | |
| 900 | int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer, |
| 901 | void *vaddr, unsigned int offset, unsigned int length, |
| 902 | unsigned int cmd) |
| 903 | { |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 904 | void (*outer_cache_op)(phys_addr_t, phys_addr_t); |
| 905 | struct ion_cp_heap *cp_heap = |
| 906 | container_of(heap, struct ion_cp_heap, heap); |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 907 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 908 | |
| 909 | switch (cmd) { |
| 910 | case ION_IOC_CLEAN_CACHES: |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 911 | dmac_clean_range(vaddr, vaddr + length); |
| 912 | outer_cache_op = outer_clean_range; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 913 | break; |
| 914 | case ION_IOC_INV_CACHES: |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 915 | dmac_inv_range(vaddr, vaddr + length); |
| 916 | outer_cache_op = outer_inv_range; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 917 | break; |
| 918 | case ION_IOC_CLEAN_INV_CACHES: |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 919 | dmac_flush_range(vaddr, vaddr + length); |
| 920 | outer_cache_op = outer_flush_range; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 921 | break; |
| 922 | default: |
| 923 | return -EINVAL; |
| 924 | } |
| 925 | |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 926 | if (cp_heap->has_outer_cache) { |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 927 | unsigned long pstart = buf->buffer + offset; |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 928 | outer_cache_op(pstart, pstart + length); |
| 929 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 930 | return 0; |
| 931 | } |
| 932 | |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 933 | static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s, |
| 934 | const struct rb_root *mem_map) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 935 | { |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 936 | unsigned long total_alloc; |
| 937 | unsigned long total_size; |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 938 | unsigned long umap_count; |
| 939 | unsigned long kmap_count; |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 940 | unsigned long heap_protected; |
Olav Haugan | e1f5d83 | 2011-12-13 15:16:28 -0800 | [diff] [blame] | 941 | struct ion_cp_heap *cp_heap = |
| 942 | container_of(heap, struct ion_cp_heap, heap); |
Olav Haugan | e1f5d83 | 2011-12-13 15:16:28 -0800 | [diff] [blame] | 943 | |
| 944 | mutex_lock(&cp_heap->lock); |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 945 | total_alloc = cp_heap->allocated_bytes; |
| 946 | total_size = cp_heap->total_size; |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 947 | umap_count = cp_heap->umap_count; |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 948 | kmap_count = ion_cp_get_total_kmap_count(cp_heap); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 949 | heap_protected = cp_heap->heap_protected == HEAP_PROTECTED; |
Olav Haugan | e1f5d83 | 2011-12-13 15:16:28 -0800 | [diff] [blame] | 950 | mutex_unlock(&cp_heap->lock); |
| 951 | |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 952 | seq_printf(s, "total bytes currently allocated: %lx\n", total_alloc); |
| 953 | seq_printf(s, "total heap size: %lx\n", total_size); |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 954 | seq_printf(s, "umapping count: %lx\n", umap_count); |
| 955 | seq_printf(s, "kmapping count: %lx\n", kmap_count); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 956 | seq_printf(s, "heap protected: %s\n", heap_protected ? "Yes" : "No"); |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 957 | seq_printf(s, "reusable: %s\n", cp_heap->reusable ? "Yes" : "No"); |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 958 | |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 959 | if (mem_map) { |
| 960 | unsigned long base = cp_heap->base; |
| 961 | unsigned long size = cp_heap->total_size; |
| 962 | unsigned long end = base+size; |
| 963 | unsigned long last_end = base; |
| 964 | struct rb_node *n; |
| 965 | |
| 966 | seq_printf(s, "\nMemory Map\n"); |
| 967 | seq_printf(s, "%16.s %14.s %14.s %14.s\n", |
| 968 | "client", "start address", "end address", |
| 969 | "size (hex)"); |
| 970 | |
| 971 | for (n = rb_first(mem_map); n; n = rb_next(n)) { |
| 972 | struct mem_map_data *data = |
| 973 | rb_entry(n, struct mem_map_data, node); |
| 974 | const char *client_name = "(null)"; |
| 975 | |
| 976 | if (last_end < data->addr) { |
| 977 | seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", |
| 978 | "FREE", last_end, data->addr-1, |
| 979 | data->addr-last_end, |
| 980 | data->addr-last_end); |
| 981 | } |
| 982 | |
| 983 | if (data->client_name) |
| 984 | client_name = data->client_name; |
| 985 | |
| 986 | seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", |
| 987 | client_name, data->addr, |
| 988 | data->addr_end, |
| 989 | data->size, data->size); |
| 990 | last_end = data->addr_end+1; |
| 991 | } |
| 992 | if (last_end < end) { |
| 993 | seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE", |
| 994 | last_end, end-1, end-last_end, end-last_end); |
| 995 | } |
| 996 | } |
| 997 | |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 998 | return 0; |
Olav Haugan | e1f5d83 | 2011-12-13 15:16:28 -0800 | [diff] [blame] | 999 | } |
| 1000 | |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 1001 | int ion_cp_secure_heap(struct ion_heap *heap, int version, void *data) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1002 | { |
| 1003 | int ret_value; |
| 1004 | struct ion_cp_heap *cp_heap = |
| 1005 | container_of(heap, struct ion_cp_heap, heap); |
| 1006 | mutex_lock(&cp_heap->lock); |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 1007 | if (cp_heap->umap_count == 0 && cp_heap->kmap_cached_count == 0) { |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 1008 | ret_value = ion_cp_protect(heap, version, data); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 1009 | } else { |
| 1010 | pr_err("ION cannot secure heap with outstanding mappings: " |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 1011 | "User space: %lu, kernel space (cached): %lu\n", |
| 1012 | cp_heap->umap_count, cp_heap->kmap_cached_count); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 1013 | ret_value = -EINVAL; |
| 1014 | } |
| 1015 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1016 | mutex_unlock(&cp_heap->lock); |
| 1017 | return ret_value; |
| 1018 | } |
| 1019 | |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 1020 | int ion_cp_unsecure_heap(struct ion_heap *heap, int version, void *data) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1021 | { |
| 1022 | int ret_value = 0; |
| 1023 | struct ion_cp_heap *cp_heap = |
| 1024 | container_of(heap, struct ion_cp_heap, heap); |
| 1025 | mutex_lock(&cp_heap->lock); |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 1026 | ion_cp_unprotect(heap, version, data); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1027 | mutex_unlock(&cp_heap->lock); |
| 1028 | return ret_value; |
| 1029 | } |
| 1030 | |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1031 | static int iommu_map_all(unsigned long domain_num, struct ion_cp_heap *cp_heap, |
Olav Haugan | 3450cae | 2012-05-14 11:36:38 -0700 | [diff] [blame] | 1032 | int partition, unsigned long prot) |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1033 | { |
| 1034 | unsigned long left_to_map = cp_heap->total_size; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1035 | unsigned long page_size = SZ_64K; |
| 1036 | int ret_value = 0; |
| 1037 | unsigned long virt_addr_len = cp_heap->total_size; |
| 1038 | struct iommu_domain *domain = msm_get_iommu_domain(domain_num); |
| 1039 | |
| 1040 | /* If we are mapping into the video domain we need to map twice the |
| 1041 | * size of the heap to account for prefetch issue in video core. |
| 1042 | */ |
| 1043 | if (domain_num == cp_heap->iommu_2x_map_domain) |
| 1044 | virt_addr_len <<= 1; |
| 1045 | |
| 1046 | if (cp_heap->total_size & (SZ_64K-1)) { |
| 1047 | pr_err("Heap size is not aligned to 64K, cannot map into IOMMU\n"); |
| 1048 | ret_value = -EINVAL; |
| 1049 | } |
| 1050 | if (cp_heap->base & (SZ_64K-1)) { |
| 1051 | pr_err("Heap physical address is not aligned to 64K, cannot map into IOMMU\n"); |
| 1052 | ret_value = -EINVAL; |
| 1053 | } |
| 1054 | if (!ret_value && domain) { |
| 1055 | unsigned long temp_phys = cp_heap->base; |
Laura Abbott | d01221b | 2012-05-16 17:52:49 -0700 | [diff] [blame] | 1056 | unsigned long temp_iova; |
| 1057 | |
| 1058 | ret_value = msm_allocate_iova_address(domain_num, partition, |
| 1059 | virt_addr_len, SZ_64K, |
| 1060 | &temp_iova); |
| 1061 | |
| 1062 | if (ret_value) { |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1063 | pr_err("%s: could not allocate iova from domain %lu, partition %d\n", |
| 1064 | __func__, domain_num, partition); |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1065 | goto out; |
| 1066 | } |
| 1067 | cp_heap->iommu_iova[domain_num] = temp_iova; |
| 1068 | |
| 1069 | while (left_to_map) { |
| 1070 | int ret = iommu_map(domain, temp_iova, temp_phys, |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 1071 | page_size, prot); |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1072 | if (ret) { |
| 1073 | pr_err("%s: could not map %lx in domain %p, error: %d\n", |
| 1074 | __func__, temp_iova, domain, ret); |
| 1075 | ret_value = -EAGAIN; |
| 1076 | goto free_iova; |
| 1077 | } |
| 1078 | temp_iova += page_size; |
| 1079 | temp_phys += page_size; |
| 1080 | left_to_map -= page_size; |
| 1081 | } |
| 1082 | if (domain_num == cp_heap->iommu_2x_map_domain) |
| 1083 | ret_value = msm_iommu_map_extra(domain, temp_iova, |
| 1084 | cp_heap->total_size, |
| 1085 | SZ_64K, prot); |
| 1086 | if (ret_value) |
| 1087 | goto free_iova; |
| 1088 | } else { |
| 1089 | pr_err("Unable to get IOMMU domain %lu\n", domain_num); |
| 1090 | ret_value = -ENOMEM; |
| 1091 | } |
| 1092 | goto out; |
| 1093 | |
| 1094 | free_iova: |
| 1095 | msm_free_iova_address(cp_heap->iommu_iova[domain_num], domain_num, |
| 1096 | partition, virt_addr_len); |
| 1097 | out: |
| 1098 | return ret_value; |
| 1099 | } |
| 1100 | |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1101 | static int ion_cp_heap_map_iommu(struct ion_buffer *buffer, |
| 1102 | struct ion_iommu_map *data, |
| 1103 | unsigned int domain_num, |
| 1104 | unsigned int partition_num, |
| 1105 | unsigned long align, |
| 1106 | unsigned long iova_length, |
| 1107 | unsigned long flags) |
| 1108 | { |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1109 | struct iommu_domain *domain; |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 1110 | int ret = 0; |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1111 | unsigned long extra; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1112 | struct ion_cp_heap *cp_heap = |
| 1113 | container_of(buffer->heap, struct ion_cp_heap, heap); |
Olav Haugan | f310cf2 | 2012-05-08 08:42:49 -0700 | [diff] [blame] | 1114 | int prot = IOMMU_WRITE | IOMMU_READ; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 1115 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Olav Haugan | f310cf2 | 2012-05-08 08:42:49 -0700 | [diff] [blame] | 1116 | prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0; |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1117 | |
| 1118 | data->mapped_size = iova_length; |
| 1119 | |
| 1120 | if (!msm_use_iommu()) { |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 1121 | data->iova_addr = buf->buffer; |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1122 | return 0; |
| 1123 | } |
| 1124 | |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1125 | if (cp_heap->iommu_iova[domain_num]) { |
| 1126 | /* Already mapped. */ |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 1127 | unsigned long offset = buf->buffer - cp_heap->base; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1128 | data->iova_addr = cp_heap->iommu_iova[domain_num] + offset; |
| 1129 | return 0; |
| 1130 | } else if (cp_heap->iommu_map_all) { |
Olav Haugan | 3450cae | 2012-05-14 11:36:38 -0700 | [diff] [blame] | 1131 | ret = iommu_map_all(domain_num, cp_heap, partition_num, prot); |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1132 | if (!ret) { |
| 1133 | unsigned long offset = |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 1134 | buf->buffer - cp_heap->base; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1135 | data->iova_addr = |
| 1136 | cp_heap->iommu_iova[domain_num] + offset; |
| 1137 | cp_heap->iommu_partition[domain_num] = partition_num; |
| 1138 | /* |
| 1139 | clear delayed map flag so that we don't interfere |
| 1140 | with this feature (we are already delaying). |
| 1141 | */ |
| 1142 | data->flags &= ~ION_IOMMU_UNMAP_DELAYED; |
| 1143 | return 0; |
| 1144 | } else { |
| 1145 | cp_heap->iommu_iova[domain_num] = 0; |
| 1146 | cp_heap->iommu_partition[domain_num] = 0; |
| 1147 | return ret; |
| 1148 | } |
| 1149 | } |
| 1150 | |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1151 | extra = iova_length - buffer->size; |
| 1152 | |
Laura Abbott | d01221b | 2012-05-16 17:52:49 -0700 | [diff] [blame] | 1153 | ret = msm_allocate_iova_address(domain_num, partition_num, |
| 1154 | data->mapped_size, align, |
| 1155 | &data->iova_addr); |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1156 | |
Laura Abbott | d01221b | 2012-05-16 17:52:49 -0700 | [diff] [blame] | 1157 | if (ret) |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1158 | goto out; |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1159 | |
| 1160 | domain = msm_get_iommu_domain(domain_num); |
| 1161 | |
| 1162 | if (!domain) { |
| 1163 | ret = -ENOMEM; |
| 1164 | goto out1; |
| 1165 | } |
| 1166 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1167 | ret = iommu_map_range(domain, data->iova_addr, buffer->sg_table->sgl, |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 1168 | buffer->size, prot); |
| 1169 | if (ret) { |
| 1170 | pr_err("%s: could not map %lx in domain %p\n", |
| 1171 | __func__, data->iova_addr, domain); |
| 1172 | goto out1; |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1173 | } |
| 1174 | |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 1175 | if (extra) { |
| 1176 | unsigned long extra_iova_addr = data->iova_addr + buffer->size; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1177 | ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, |
| 1178 | SZ_4K, prot); |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 1179 | if (ret) |
| 1180 | goto out2; |
| 1181 | } |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 1182 | return ret; |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1183 | |
| 1184 | out2: |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 1185 | iommu_unmap_range(domain, data->iova_addr, buffer->size); |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1186 | out1: |
| 1187 | msm_free_iova_address(data->iova_addr, domain_num, partition_num, |
| 1188 | data->mapped_size); |
| 1189 | out: |
| 1190 | return ret; |
| 1191 | } |
| 1192 | |
| 1193 | static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data) |
| 1194 | { |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1195 | unsigned int domain_num; |
| 1196 | unsigned int partition_num; |
| 1197 | struct iommu_domain *domain; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1198 | struct ion_cp_heap *cp_heap = |
| 1199 | container_of(data->buffer->heap, struct ion_cp_heap, heap); |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1200 | |
| 1201 | if (!msm_use_iommu()) |
| 1202 | return; |
| 1203 | |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1204 | |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1205 | domain_num = iommu_map_domain(data); |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1206 | |
| 1207 | /* If we are mapping everything we'll wait to unmap until everything |
| 1208 | is freed. */ |
| 1209 | if (cp_heap->iommu_iova[domain_num]) |
| 1210 | return; |
| 1211 | |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1212 | partition_num = iommu_map_partition(data); |
| 1213 | |
| 1214 | domain = msm_get_iommu_domain(domain_num); |
| 1215 | |
| 1216 | if (!domain) { |
| 1217 | WARN(1, "Could not get domain %d. Corruption?\n", domain_num); |
| 1218 | return; |
| 1219 | } |
| 1220 | |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 1221 | iommu_unmap_range(domain, data->iova_addr, data->mapped_size); |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1222 | msm_free_iova_address(data->iova_addr, domain_num, partition_num, |
| 1223 | data->mapped_size); |
| 1224 | |
| 1225 | return; |
| 1226 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1227 | |
| 1228 | static struct ion_heap_ops cp_heap_ops = { |
| 1229 | .allocate = ion_cp_heap_allocate, |
| 1230 | .free = ion_cp_heap_free, |
| 1231 | .phys = ion_cp_heap_phys, |
| 1232 | .map_user = ion_cp_heap_map_user, |
| 1233 | .unmap_user = ion_cp_heap_unmap_user, |
| 1234 | .map_kernel = ion_cp_heap_map_kernel, |
| 1235 | .unmap_kernel = ion_cp_heap_unmap_kernel, |
| 1236 | .map_dma = ion_cp_heap_map_dma, |
| 1237 | .unmap_dma = ion_cp_heap_unmap_dma, |
| 1238 | .cache_op = ion_cp_cache_ops, |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 1239 | .print_debug = ion_cp_print_debug, |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1240 | .secure_heap = ion_cp_secure_heap, |
| 1241 | .unsecure_heap = ion_cp_unsecure_heap, |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1242 | .map_iommu = ion_cp_heap_map_iommu, |
| 1243 | .unmap_iommu = ion_cp_heap_unmap_iommu, |
Laura Abbott | 9361930 | 2012-10-11 11:51:40 -0700 | [diff] [blame] | 1244 | .secure_buffer = ion_cp_secure_buffer, |
| 1245 | .unsecure_buffer = ion_cp_unsecure_buffer, |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1246 | }; |
| 1247 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1248 | struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data) |
| 1249 | { |
| 1250 | struct ion_cp_heap *cp_heap; |
| 1251 | int ret; |
| 1252 | |
| 1253 | cp_heap = kzalloc(sizeof(*cp_heap), GFP_KERNEL); |
| 1254 | if (!cp_heap) |
| 1255 | return ERR_PTR(-ENOMEM); |
| 1256 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1257 | mutex_init(&cp_heap->lock); |
| 1258 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1259 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1260 | cp_heap->allocated_bytes = 0; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1261 | cp_heap->umap_count = 0; |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 1262 | cp_heap->kmap_cached_count = 0; |
| 1263 | cp_heap->kmap_uncached_count = 0; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1264 | cp_heap->total_size = heap_data->size; |
| 1265 | cp_heap->heap.ops = &cp_heap_ops; |
Mitchel Humpherys | 362b52b | 2012-09-13 10:53:22 -0700 | [diff] [blame] | 1266 | cp_heap->heap.type = (enum ion_heap_type) ION_HEAP_TYPE_CP; |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 1267 | cp_heap->heap_protected = HEAP_NOT_PROTECTED; |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 1268 | cp_heap->secure_base = heap_data->base; |
Olav Haugan | 42ebe71 | 2012-01-10 16:30:58 -0800 | [diff] [blame] | 1269 | cp_heap->secure_size = heap_data->size; |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 1270 | cp_heap->has_outer_cache = heap_data->has_outer_cache; |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 1271 | cp_heap->heap_size = heap_data->size; |
| 1272 | |
Laura Abbott | f68983e | 2012-06-13 16:23:23 -0700 | [diff] [blame] | 1273 | atomic_set(&cp_heap->protect_cnt, 0); |
Olav Haugan | 0703dbf | 2011-12-19 17:53:38 -0800 | [diff] [blame] | 1274 | if (heap_data->extra_data) { |
| 1275 | struct ion_cp_heap_pdata *extra_data = |
| 1276 | heap_data->extra_data; |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 1277 | cp_heap->reusable = extra_data->reusable; |
| 1278 | cp_heap->reserved_vrange = extra_data->virt_addr; |
Olav Haugan | 0703dbf | 2011-12-19 17:53:38 -0800 | [diff] [blame] | 1279 | cp_heap->permission_type = extra_data->permission_type; |
Olav Haugan | 42ebe71 | 2012-01-10 16:30:58 -0800 | [diff] [blame] | 1280 | if (extra_data->secure_size) { |
| 1281 | cp_heap->secure_base = extra_data->secure_base; |
| 1282 | cp_heap->secure_size = extra_data->secure_size; |
| 1283 | } |
Olav Haugan | 0703dbf | 2011-12-19 17:53:38 -0800 | [diff] [blame] | 1284 | if (extra_data->setup_region) |
| 1285 | cp_heap->bus_id = extra_data->setup_region(); |
| 1286 | if (extra_data->request_region) |
Laura Abbott | aedbe42 | 2012-08-03 17:06:22 -0700 | [diff] [blame] | 1287 | cp_heap->heap_request_region = |
| 1288 | extra_data->request_region; |
Olav Haugan | 0703dbf | 2011-12-19 17:53:38 -0800 | [diff] [blame] | 1289 | if (extra_data->release_region) |
Laura Abbott | aedbe42 | 2012-08-03 17:06:22 -0700 | [diff] [blame] | 1290 | cp_heap->heap_release_region = |
| 1291 | extra_data->release_region; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1292 | cp_heap->iommu_map_all = |
| 1293 | extra_data->iommu_map_all; |
| 1294 | cp_heap->iommu_2x_map_domain = |
| 1295 | extra_data->iommu_2x_map_domain; |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 1296 | cp_heap->cma = extra_data->is_cma; |
Laura Abbott | ac96331 | 2012-12-11 15:09:03 -0800 | [diff] [blame] | 1297 | cp_heap->disallow_non_secure_allocation = |
| 1298 | extra_data->no_nonsecure_alloc; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1299 | |
Olav Haugan | 0703dbf | 2011-12-19 17:53:38 -0800 | [diff] [blame] | 1300 | } |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1301 | |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 1302 | if (cp_heap->cma) { |
| 1303 | cp_heap->pool = NULL; |
| 1304 | cp_heap->cpu_addr = 0; |
| 1305 | cp_heap->heap.priv = heap_data->priv; |
| 1306 | } else { |
| 1307 | cp_heap->pool = gen_pool_create(12, -1); |
| 1308 | if (!cp_heap->pool) |
| 1309 | goto free_heap; |
| 1310 | |
| 1311 | cp_heap->base = heap_data->base; |
| 1312 | ret = gen_pool_add(cp_heap->pool, cp_heap->base, |
| 1313 | heap_data->size, -1); |
| 1314 | if (ret < 0) |
| 1315 | goto destroy_pool; |
| 1316 | |
| 1317 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1318 | return &cp_heap->heap; |
| 1319 | |
| 1320 | destroy_pool: |
| 1321 | gen_pool_destroy(cp_heap->pool); |
| 1322 | |
| 1323 | free_heap: |
| 1324 | kfree(cp_heap); |
| 1325 | |
| 1326 | return ERR_PTR(-ENOMEM); |
| 1327 | } |
| 1328 | |
| 1329 | void ion_cp_heap_destroy(struct ion_heap *heap) |
| 1330 | { |
| 1331 | struct ion_cp_heap *cp_heap = |
| 1332 | container_of(heap, struct ion_cp_heap, heap); |
| 1333 | |
| 1334 | gen_pool_destroy(cp_heap->pool); |
| 1335 | kfree(cp_heap); |
| 1336 | cp_heap = NULL; |
| 1337 | } |
| 1338 | |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 1339 | void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base, |
| 1340 | unsigned long *size) \ |
| 1341 | { |
| 1342 | struct ion_cp_heap *cp_heap = |
| 1343 | container_of(heap, struct ion_cp_heap, heap); |
| 1344 | *base = cp_heap->base; |
| 1345 | *size = cp_heap->total_size; |
| 1346 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1347 | |
| 1348 | /* SCM related code for locking down memory for content protection */ |
| 1349 | |
| 1350 | #define SCM_CP_LOCK_CMD_ID 0x1 |
| 1351 | #define SCM_CP_PROTECT 0x1 |
| 1352 | #define SCM_CP_UNPROTECT 0x0 |
| 1353 | |
| 1354 | struct cp_lock_msg { |
| 1355 | unsigned int start; |
| 1356 | unsigned int end; |
| 1357 | unsigned int permission_type; |
| 1358 | unsigned char lock; |
Olav Haugan | 41cf3e3 | 2012-01-16 12:13:24 -0800 | [diff] [blame] | 1359 | } __attribute__ ((__packed__)); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1360 | |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 1361 | static int ion_cp_protect_mem_v1(unsigned int phy_base, unsigned int size, |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1362 | unsigned int permission_type) |
| 1363 | { |
| 1364 | struct cp_lock_msg cmd; |
| 1365 | cmd.start = phy_base; |
| 1366 | cmd.end = phy_base + size; |
| 1367 | cmd.permission_type = permission_type; |
| 1368 | cmd.lock = SCM_CP_PROTECT; |
| 1369 | |
| 1370 | return scm_call(SCM_SVC_CP, SCM_CP_LOCK_CMD_ID, |
| 1371 | &cmd, sizeof(cmd), NULL, 0); |
| 1372 | } |
| 1373 | |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 1374 | static int ion_cp_unprotect_mem_v1(unsigned int phy_base, unsigned int size, |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1375 | unsigned int permission_type) |
| 1376 | { |
| 1377 | struct cp_lock_msg cmd; |
| 1378 | cmd.start = phy_base; |
| 1379 | cmd.end = phy_base + size; |
| 1380 | cmd.permission_type = permission_type; |
| 1381 | cmd.lock = SCM_CP_UNPROTECT; |
| 1382 | |
| 1383 | return scm_call(SCM_SVC_CP, SCM_CP_LOCK_CMD_ID, |
| 1384 | &cmd, sizeof(cmd), NULL, 0); |
| 1385 | } |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 1386 | |
| 1387 | #define V2_CHUNK_SIZE SZ_1M |
| 1388 | |
| 1389 | static int ion_cp_change_mem_v2(unsigned int phy_base, unsigned int size, |
| 1390 | void *data, int lock) |
| 1391 | { |
| 1392 | enum cp_mem_usage usage = (enum cp_mem_usage) data; |
| 1393 | unsigned long *chunk_list; |
| 1394 | int nchunks; |
| 1395 | int ret; |
| 1396 | int i; |
| 1397 | |
| 1398 | if (usage < 0 || usage >= MAX_USAGE) |
| 1399 | return -EINVAL; |
| 1400 | |
| 1401 | if (!IS_ALIGNED(size, V2_CHUNK_SIZE)) { |
| 1402 | pr_err("%s: heap size is not aligned to %x\n", |
| 1403 | __func__, V2_CHUNK_SIZE); |
| 1404 | return -EINVAL; |
| 1405 | } |
| 1406 | |
| 1407 | nchunks = size / V2_CHUNK_SIZE; |
| 1408 | |
| 1409 | chunk_list = allocate_contiguous_ebi(sizeof(unsigned long)*nchunks, |
| 1410 | SZ_4K, 0); |
| 1411 | if (!chunk_list) |
| 1412 | return -ENOMEM; |
| 1413 | |
| 1414 | for (i = 0; i < nchunks; i++) |
| 1415 | chunk_list[i] = phy_base + i * V2_CHUNK_SIZE; |
| 1416 | |
| 1417 | ret = ion_cp_change_chunks_state(memory_pool_node_paddr(chunk_list), |
| 1418 | nchunks, V2_CHUNK_SIZE, usage, lock); |
| 1419 | |
| 1420 | free_contiguous_memory(chunk_list); |
| 1421 | return ret; |
| 1422 | } |
| 1423 | |
| 1424 | static int ion_cp_protect_mem(unsigned int phy_base, unsigned int size, |
| 1425 | unsigned int permission_type, int version, |
| 1426 | void *data) |
| 1427 | { |
| 1428 | switch (version) { |
| 1429 | case ION_CP_V1: |
| 1430 | return ion_cp_protect_mem_v1(phy_base, size, permission_type); |
| 1431 | case ION_CP_V2: |
| 1432 | return ion_cp_change_mem_v2(phy_base, size, data, |
| 1433 | SCM_CP_PROTECT); |
| 1434 | default: |
| 1435 | return -EINVAL; |
| 1436 | } |
| 1437 | } |
| 1438 | |
| 1439 | static int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size, |
| 1440 | unsigned int permission_type, int version, |
| 1441 | void *data) |
| 1442 | { |
| 1443 | switch (version) { |
| 1444 | case ION_CP_V1: |
| 1445 | return ion_cp_unprotect_mem_v1(phy_base, size, permission_type); |
| 1446 | case ION_CP_V2: |
| 1447 | return ion_cp_change_mem_v2(phy_base, size, data, |
| 1448 | SCM_CP_UNPROTECT); |
| 1449 | default: |
| 1450 | return -EINVAL; |
| 1451 | } |
| 1452 | } |