Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1 | /* |
| 2 | * drivers/gpu/ion/ion_cp_heap.c |
| 3 | * |
| 4 | * Copyright (C) 2011 Google, Inc. |
Laura Abbott | 7db4e0b | 2013-01-03 14:20:16 -0800 | [diff] [blame] | 5 | * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 6 | * |
| 7 | * This software is licensed under the terms of the GNU General Public |
| 8 | * License version 2, as published by the Free Software Foundation, and |
| 9 | * may be copied, distributed, and modified under those terms. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | */ |
| 17 | #include <linux/spinlock.h> |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 18 | #include <linux/delay.h> |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 19 | #include <linux/err.h> |
| 20 | #include <linux/genalloc.h> |
| 21 | #include <linux/io.h> |
Mitchel Humpherys | af2e5c5 | 2012-09-06 12:16:36 -0700 | [diff] [blame] | 22 | #include <linux/msm_ion.h> |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 23 | #include <linux/mm.h> |
| 24 | #include <linux/scatterlist.h> |
| 25 | #include <linux/slab.h> |
| 26 | #include <linux/vmalloc.h> |
| 27 | #include <linux/memory_alloc.h> |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 28 | #include <linux/seq_file.h> |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 29 | #include <linux/fmem.h> |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 30 | #include <linux/iommu.h> |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 31 | #include <linux/dma-mapping.h> |
Liam Mark | cc2d4bd | 2013-01-16 10:14:40 -0800 | [diff] [blame] | 32 | #include <trace/events/kmem.h> |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 33 | |
| 34 | #include <asm/mach/map.h> |
| 35 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 36 | #include <mach/msm_memtypes.h> |
| 37 | #include <mach/scm.h> |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 38 | #include <mach/iommu_domains.h> |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 39 | |
| 40 | #include "ion_priv.h" |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 41 | |
| 42 | #include <asm/mach/map.h> |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 43 | #include <asm/cacheflush.h> |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 44 | |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 45 | #include "msm/ion_cp_common.h" |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 46 | /** |
| 47 | * struct ion_cp_heap - container for the heap and shared heap data |
| 48 | |
| 49 | * @heap: the heap information structure |
| 50 | * @pool: memory pool to allocate from. |
| 51 | * @base: the base address of the memory pool. |
| 52 | * @permission_type: Identifier for the memory used by SCM for protecting |
| 53 | * and unprotecting memory. |
Olav Haugan | 42ebe71 | 2012-01-10 16:30:58 -0800 | [diff] [blame] | 54 | * @secure_base: Base address used when securing a heap that is shared. |
| 55 | * @secure_size: Size used when securing a heap that is shared. |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 56 | * @lock: mutex to protect shared access. |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 57 | * @heap_protected: Indicates whether heap has been protected or not. |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 58 | * @allocated_bytes: the total number of allocated bytes from the pool. |
| 59 | * @total_size: the total size of the memory pool. |
| 60 | * @request_region: function pointer to call when first mapping of memory |
| 61 | * occurs. |
| 62 | * @release_region: function pointer to call when last mapping of memory |
| 63 | * unmapped. |
| 64 | * @bus_id: token used with request/release region. |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 65 | * @kmap_cached_count: the total number of times this heap has been mapped in |
| 66 | * kernel space (cached). |
| 67 | * @kmap_uncached_count:the total number of times this heap has been mapped in |
| 68 | * kernel space (un-cached). |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 69 | * @umap_count: the total number of times this heap has been mapped in |
| 70 | * user space. |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 71 | * @iommu_iova: saved iova when mapping full heap at once. |
| 72 | * @iommu_partition: partition used to map full heap. |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 73 | * @reusable: indicates if the memory should be reused via fmem. |
| 74 | * @reserved_vrange: reserved virtual address range for use with fmem |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 75 | * @iommu_map_all: Indicates whether we should map whole heap into IOMMU. |
| 76 | * @iommu_2x_map_domain: Indicates the domain to use for overmapping. |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 77 | * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise. |
| 78 | */ |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 79 | struct ion_cp_heap { |
| 80 | struct ion_heap heap; |
| 81 | struct gen_pool *pool; |
| 82 | ion_phys_addr_t base; |
| 83 | unsigned int permission_type; |
Olav Haugan | 42ebe71 | 2012-01-10 16:30:58 -0800 | [diff] [blame] | 84 | ion_phys_addr_t secure_base; |
| 85 | size_t secure_size; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 86 | struct mutex lock; |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 87 | unsigned int heap_protected; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 88 | unsigned long allocated_bytes; |
| 89 | unsigned long total_size; |
Laura Abbott | aedbe42 | 2012-08-03 17:06:22 -0700 | [diff] [blame] | 90 | int (*heap_request_region)(void *); |
| 91 | int (*heap_release_region)(void *); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 92 | void *bus_id; |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 93 | unsigned long kmap_cached_count; |
| 94 | unsigned long kmap_uncached_count; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 95 | unsigned long umap_count; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 96 | unsigned long iommu_iova[MAX_DOMAINS]; |
| 97 | unsigned long iommu_partition[MAX_DOMAINS]; |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 98 | int reusable; |
| 99 | void *reserved_vrange; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 100 | int iommu_map_all; |
| 101 | int iommu_2x_map_domain; |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 102 | unsigned int has_outer_cache; |
Laura Abbott | f68983e | 2012-06-13 16:23:23 -0700 | [diff] [blame] | 103 | atomic_t protect_cnt; |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 104 | void *cpu_addr; |
| 105 | size_t heap_size; |
| 106 | dma_addr_t handle; |
| 107 | int cma; |
Mitchel Humpherys | 345f023 | 2013-01-11 10:55:25 -0800 | [diff] [blame] | 108 | int allow_non_secure_allocation; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 109 | }; |
| 110 | |
| 111 | enum { |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 112 | HEAP_NOT_PROTECTED = 0, |
| 113 | HEAP_PROTECTED = 1, |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 114 | }; |
| 115 | |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 116 | #define DMA_ALLOC_TRIES 5 |
| 117 | |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 118 | static int allocate_heap_memory(struct ion_heap *heap) |
| 119 | { |
| 120 | struct device *dev = heap->priv; |
| 121 | struct ion_cp_heap *cp_heap = |
| 122 | container_of(heap, struct ion_cp_heap, heap); |
| 123 | int ret; |
| 124 | int tries = 0; |
| 125 | DEFINE_DMA_ATTRS(attrs); |
| 126 | dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); |
| 127 | |
| 128 | |
| 129 | if (cp_heap->cpu_addr) |
| 130 | return 0; |
| 131 | |
| 132 | while (!cp_heap->cpu_addr && (++tries < DMA_ALLOC_TRIES)) { |
| 133 | cp_heap->cpu_addr = dma_alloc_attrs(dev, |
| 134 | cp_heap->heap_size, |
| 135 | &(cp_heap->handle), |
| 136 | 0, |
| 137 | &attrs); |
Liam Mark | cc2d4bd | 2013-01-16 10:14:40 -0800 | [diff] [blame] | 138 | if (!cp_heap->cpu_addr) { |
| 139 | trace_ion_cp_alloc_retry(tries); |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 140 | msleep(20); |
Liam Mark | cc2d4bd | 2013-01-16 10:14:40 -0800 | [diff] [blame] | 141 | } |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 142 | } |
| 143 | |
| 144 | if (!cp_heap->cpu_addr) |
| 145 | goto out; |
| 146 | |
| 147 | cp_heap->base = cp_heap->handle; |
| 148 | |
| 149 | cp_heap->pool = gen_pool_create(12, -1); |
| 150 | if (!cp_heap->pool) |
| 151 | goto out_free; |
| 152 | |
| 153 | ret = gen_pool_add(cp_heap->pool, cp_heap->base, |
| 154 | cp_heap->heap_size, -1); |
| 155 | if (ret < 0) |
| 156 | goto out_pool; |
| 157 | |
| 158 | return 0; |
| 159 | |
| 160 | out_pool: |
| 161 | gen_pool_destroy(cp_heap->pool); |
| 162 | out_free: |
| 163 | dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr, |
| 164 | cp_heap->handle); |
| 165 | out: |
| 166 | return ION_CP_ALLOCATE_FAIL; |
| 167 | } |
| 168 | |
| 169 | static void free_heap_memory(struct ion_heap *heap) |
| 170 | { |
| 171 | struct device *dev = heap->priv; |
| 172 | struct ion_cp_heap *cp_heap = |
| 173 | container_of(heap, struct ion_cp_heap, heap); |
| 174 | |
| 175 | /* release memory */ |
| 176 | dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr, |
| 177 | cp_heap->handle); |
| 178 | gen_pool_destroy(cp_heap->pool); |
| 179 | cp_heap->pool = NULL; |
| 180 | cp_heap->cpu_addr = 0; |
| 181 | } |
| 182 | |
| 183 | |
| 184 | |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 185 | /** |
| 186 | * Get the total number of kernel mappings. |
| 187 | * Must be called with heap->lock locked. |
| 188 | */ |
| 189 | static unsigned long ion_cp_get_total_kmap_count( |
| 190 | const struct ion_cp_heap *cp_heap) |
| 191 | { |
| 192 | return cp_heap->kmap_cached_count + cp_heap->kmap_uncached_count; |
| 193 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 194 | |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 195 | static int ion_on_first_alloc(struct ion_heap *heap) |
| 196 | { |
| 197 | struct ion_cp_heap *cp_heap = |
| 198 | container_of(heap, struct ion_cp_heap, heap); |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 199 | int ret_value; |
| 200 | |
| 201 | if (cp_heap->reusable) { |
| 202 | ret_value = fmem_set_state(FMEM_C_STATE); |
| 203 | if (ret_value) |
| 204 | return 1; |
| 205 | } |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 206 | |
| 207 | if (cp_heap->cma) { |
| 208 | ret_value = allocate_heap_memory(heap); |
| 209 | if (ret_value) |
| 210 | return 1; |
| 211 | } |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 212 | return 0; |
| 213 | } |
| 214 | |
| 215 | static void ion_on_last_free(struct ion_heap *heap) |
| 216 | { |
| 217 | struct ion_cp_heap *cp_heap = |
| 218 | container_of(heap, struct ion_cp_heap, heap); |
| 219 | |
| 220 | if (cp_heap->reusable) |
| 221 | if (fmem_set_state(FMEM_T_STATE) != 0) |
| 222 | pr_err("%s: unable to transition heap to T-state\n", |
| 223 | __func__); |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 224 | |
| 225 | if (cp_heap->cma) |
| 226 | free_heap_memory(heap); |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 227 | } |
| 228 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 229 | /** |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 230 | * Protects memory if heap is unsecured heap. Also ensures that we are in |
| 231 | * the correct FMEM state if this heap is a reusable heap. |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 232 | * Must be called with heap->lock locked. |
| 233 | */ |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 234 | static int ion_cp_protect(struct ion_heap *heap, int version, void *data) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 235 | { |
| 236 | struct ion_cp_heap *cp_heap = |
| 237 | container_of(heap, struct ion_cp_heap, heap); |
| 238 | int ret_value = 0; |
| 239 | |
Laura Abbott | f68983e | 2012-06-13 16:23:23 -0700 | [diff] [blame] | 240 | if (atomic_inc_return(&cp_heap->protect_cnt) == 1) { |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 241 | /* Make sure we are in C state when the heap is protected. */ |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 242 | if (!cp_heap->allocated_bytes) |
| 243 | if (ion_on_first_alloc(heap)) |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 244 | goto out; |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 245 | |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 246 | ret_value = ion_cp_protect_mem(cp_heap->secure_base, |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 247 | cp_heap->secure_size, cp_heap->permission_type, |
| 248 | version, data); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 249 | if (ret_value) { |
| 250 | pr_err("Failed to protect memory for heap %s - " |
Olav Haugan | 9b2d1c2 | 2012-01-09 15:23:08 -0800 | [diff] [blame] | 251 | "error code: %d\n", heap->name, ret_value); |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 252 | |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 253 | if (!cp_heap->allocated_bytes) |
| 254 | ion_on_last_free(heap); |
| 255 | |
Laura Abbott | f68983e | 2012-06-13 16:23:23 -0700 | [diff] [blame] | 256 | atomic_dec(&cp_heap->protect_cnt); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 257 | } else { |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 258 | cp_heap->heap_protected = HEAP_PROTECTED; |
Laura Abbott | 1135c9e | 2013-03-13 15:33:40 -0700 | [diff] [blame] | 259 | pr_debug("Protected heap %s @ 0x%pa\n", |
| 260 | heap->name, &cp_heap->base); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 261 | } |
| 262 | } |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 263 | out: |
Laura Abbott | f68983e | 2012-06-13 16:23:23 -0700 | [diff] [blame] | 264 | pr_debug("%s: protect count is %d\n", __func__, |
| 265 | atomic_read(&cp_heap->protect_cnt)); |
| 266 | BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 267 | return ret_value; |
| 268 | } |
| 269 | |
| 270 | /** |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 271 | * Unprotects memory if heap is secure heap. Also ensures that we are in |
| 272 | * the correct FMEM state if this heap is a reusable heap. |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 273 | * Must be called with heap->lock locked. |
| 274 | */ |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 275 | static void ion_cp_unprotect(struct ion_heap *heap, int version, void *data) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 276 | { |
| 277 | struct ion_cp_heap *cp_heap = |
| 278 | container_of(heap, struct ion_cp_heap, heap); |
| 279 | |
Laura Abbott | f68983e | 2012-06-13 16:23:23 -0700 | [diff] [blame] | 280 | if (atomic_dec_and_test(&cp_heap->protect_cnt)) { |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 281 | int error_code = ion_cp_unprotect_mem( |
Olav Haugan | 42ebe71 | 2012-01-10 16:30:58 -0800 | [diff] [blame] | 282 | cp_heap->secure_base, cp_heap->secure_size, |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 283 | cp_heap->permission_type, version, data); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 284 | if (error_code) { |
| 285 | pr_err("Failed to un-protect memory for heap %s - " |
Olav Haugan | 9b2d1c2 | 2012-01-09 15:23:08 -0800 | [diff] [blame] | 286 | "error code: %d\n", heap->name, error_code); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 287 | } else { |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 288 | cp_heap->heap_protected = HEAP_NOT_PROTECTED; |
Olav Haugan | 9b2d1c2 | 2012-01-09 15:23:08 -0800 | [diff] [blame] | 289 | pr_debug("Un-protected heap %s @ 0x%x\n", heap->name, |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 290 | (unsigned int) cp_heap->base); |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 291 | |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 292 | if (!cp_heap->allocated_bytes) |
| 293 | ion_on_last_free(heap); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 294 | } |
| 295 | } |
Laura Abbott | f68983e | 2012-06-13 16:23:23 -0700 | [diff] [blame] | 296 | pr_debug("%s: protect count is %d\n", __func__, |
| 297 | atomic_read(&cp_heap->protect_cnt)); |
| 298 | BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 299 | } |
| 300 | |
| 301 | ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap, |
| 302 | unsigned long size, |
| 303 | unsigned long align, |
| 304 | unsigned long flags) |
| 305 | { |
| 306 | unsigned long offset; |
| 307 | unsigned long secure_allocation = flags & ION_SECURE; |
Laura Abbott | 7db4e0b | 2013-01-03 14:20:16 -0800 | [diff] [blame] | 308 | unsigned long force_contig = flags & ION_FORCE_CONTIGUOUS; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 309 | |
| 310 | struct ion_cp_heap *cp_heap = |
| 311 | container_of(heap, struct ion_cp_heap, heap); |
| 312 | |
| 313 | mutex_lock(&cp_heap->lock); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 314 | if (!secure_allocation && cp_heap->heap_protected == HEAP_PROTECTED) { |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 315 | mutex_unlock(&cp_heap->lock); |
| 316 | pr_err("ION cannot allocate un-secure memory from protected" |
Olav Haugan | 9b2d1c2 | 2012-01-09 15:23:08 -0800 | [diff] [blame] | 317 | " heap %s\n", heap->name); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 318 | return ION_CP_ALLOCATE_FAIL; |
| 319 | } |
| 320 | |
Laura Abbott | 7db4e0b | 2013-01-03 14:20:16 -0800 | [diff] [blame] | 321 | if (!force_contig && !secure_allocation && |
Mitchel Humpherys | 345f023 | 2013-01-11 10:55:25 -0800 | [diff] [blame] | 322 | !cp_heap->allow_non_secure_allocation) { |
Laura Abbott | ac96331 | 2012-12-11 15:09:03 -0800 | [diff] [blame] | 323 | mutex_unlock(&cp_heap->lock); |
| 324 | pr_debug("%s: non-secure allocation disallowed from this heap\n", |
| 325 | __func__); |
| 326 | return ION_CP_ALLOCATE_FAIL; |
| 327 | } |
| 328 | |
Laura Abbott | 087db59 | 2012-11-01 09:41:37 -0700 | [diff] [blame] | 329 | /* |
| 330 | * The check above already checked for non-secure allocations when the |
| 331 | * heap is protected. HEAP_PROTECTED implies that this must be a secure |
| 332 | * allocation. If the heap is protected and there are userspace or |
| 333 | * cached kernel mappings, something has gone wrong in the security |
| 334 | * model. |
| 335 | */ |
| 336 | if (cp_heap->heap_protected == HEAP_PROTECTED) { |
| 337 | BUG_ON(cp_heap->umap_count != 0); |
| 338 | BUG_ON(cp_heap->kmap_cached_count != 0); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 339 | } |
| 340 | |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 341 | /* |
| 342 | * if this is the first reusable allocation, transition |
| 343 | * the heap |
| 344 | */ |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 345 | if (!cp_heap->allocated_bytes) |
| 346 | if (ion_on_first_alloc(heap)) { |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 347 | mutex_unlock(&cp_heap->lock); |
| 348 | return ION_RESERVED_ALLOCATE_FAIL; |
| 349 | } |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 350 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 351 | cp_heap->allocated_bytes += size; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 352 | mutex_unlock(&cp_heap->lock); |
| 353 | |
| 354 | offset = gen_pool_alloc_aligned(cp_heap->pool, |
| 355 | size, ilog2(align)); |
| 356 | |
| 357 | if (!offset) { |
| 358 | mutex_lock(&cp_heap->lock); |
Olav Haugan | d710ed1 | 2012-04-19 14:23:04 -0700 | [diff] [blame] | 359 | cp_heap->allocated_bytes -= size; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 360 | if ((cp_heap->total_size - |
Olav Haugan | d710ed1 | 2012-04-19 14:23:04 -0700 | [diff] [blame] | 361 | cp_heap->allocated_bytes) >= size) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 362 | pr_debug("%s: heap %s has enough memory (%lx) but" |
| 363 | " the allocation of size %lx still failed." |
Olav Haugan | 9b2d1c2 | 2012-01-09 15:23:08 -0800 | [diff] [blame] | 364 | " Memory is probably fragmented.\n", |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 365 | __func__, heap->name, |
| 366 | cp_heap->total_size - |
| 367 | cp_heap->allocated_bytes, size); |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 368 | if (!cp_heap->allocated_bytes && |
| 369 | cp_heap->heap_protected == HEAP_NOT_PROTECTED) |
| 370 | ion_on_last_free(heap); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 371 | mutex_unlock(&cp_heap->lock); |
| 372 | |
| 373 | return ION_CP_ALLOCATE_FAIL; |
| 374 | } |
| 375 | |
| 376 | return offset; |
| 377 | } |
| 378 | |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 379 | static void iommu_unmap_all(unsigned long domain_num, |
| 380 | struct ion_cp_heap *cp_heap) |
| 381 | { |
| 382 | unsigned long left_to_unmap = cp_heap->total_size; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 383 | unsigned long page_size = SZ_64K; |
| 384 | |
| 385 | struct iommu_domain *domain = msm_get_iommu_domain(domain_num); |
| 386 | if (domain) { |
| 387 | unsigned long temp_iova = cp_heap->iommu_iova[domain_num]; |
| 388 | |
| 389 | while (left_to_unmap) { |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 390 | iommu_unmap(domain, temp_iova, page_size); |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 391 | temp_iova += page_size; |
| 392 | left_to_unmap -= page_size; |
| 393 | } |
| 394 | if (domain_num == cp_heap->iommu_2x_map_domain) |
| 395 | msm_iommu_unmap_extra(domain, temp_iova, |
| 396 | cp_heap->total_size, SZ_64K); |
| 397 | } else { |
| 398 | pr_err("Unable to get IOMMU domain %lu\n", domain_num); |
| 399 | } |
| 400 | } |
| 401 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 402 | void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr, |
| 403 | unsigned long size) |
| 404 | { |
| 405 | struct ion_cp_heap *cp_heap = |
| 406 | container_of(heap, struct ion_cp_heap, heap); |
| 407 | |
| 408 | if (addr == ION_CP_ALLOCATE_FAIL) |
| 409 | return; |
| 410 | gen_pool_free(cp_heap->pool, addr, size); |
| 411 | |
| 412 | mutex_lock(&cp_heap->lock); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 413 | cp_heap->allocated_bytes -= size; |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 414 | |
Laura Abbott | 94ad25e | 2012-08-03 13:56:21 -0700 | [diff] [blame] | 415 | if (!cp_heap->allocated_bytes && |
| 416 | cp_heap->heap_protected == HEAP_NOT_PROTECTED) |
| 417 | ion_on_last_free(heap); |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 418 | |
| 419 | /* Unmap everything if we previously mapped the whole heap at once. */ |
| 420 | if (!cp_heap->allocated_bytes) { |
| 421 | unsigned int i; |
| 422 | for (i = 0; i < MAX_DOMAINS; ++i) { |
| 423 | if (cp_heap->iommu_iova[i]) { |
| 424 | unsigned long vaddr_len = cp_heap->total_size; |
| 425 | |
| 426 | if (i == cp_heap->iommu_2x_map_domain) |
| 427 | vaddr_len <<= 1; |
| 428 | iommu_unmap_all(i, cp_heap); |
| 429 | |
| 430 | msm_free_iova_address(cp_heap->iommu_iova[i], i, |
| 431 | cp_heap->iommu_partition[i], |
| 432 | vaddr_len); |
| 433 | } |
| 434 | cp_heap->iommu_iova[i] = 0; |
| 435 | cp_heap->iommu_partition[i] = 0; |
| 436 | } |
| 437 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 438 | mutex_unlock(&cp_heap->lock); |
| 439 | } |
| 440 | |
| 441 | static int ion_cp_heap_phys(struct ion_heap *heap, |
| 442 | struct ion_buffer *buffer, |
| 443 | ion_phys_addr_t *addr, size_t *len) |
| 444 | { |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 445 | struct ion_cp_buffer *buf = buffer->priv_virt; |
| 446 | |
| 447 | *addr = buf->buffer; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 448 | *len = buffer->size; |
| 449 | return 0; |
| 450 | } |
| 451 | |
| 452 | static int ion_cp_heap_allocate(struct ion_heap *heap, |
| 453 | struct ion_buffer *buffer, |
| 454 | unsigned long size, unsigned long align, |
| 455 | unsigned long flags) |
| 456 | { |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 457 | struct ion_cp_buffer *buf; |
| 458 | phys_addr_t addr; |
| 459 | |
| 460 | buf = kzalloc(sizeof(*buf), GFP_KERNEL); |
| 461 | if (!buf) |
| 462 | return ION_CP_ALLOCATE_FAIL; |
| 463 | |
| 464 | addr = ion_cp_allocate(heap, size, align, flags); |
| 465 | if (addr == ION_CP_ALLOCATE_FAIL) |
| 466 | return -ENOMEM; |
| 467 | |
| 468 | buf->buffer = addr; |
Laura Abbott | 9361930 | 2012-10-11 11:51:40 -0700 | [diff] [blame] | 469 | buf->want_delayed_unsecure = 0; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 470 | atomic_set(&buf->secure_cnt, 0); |
| 471 | mutex_init(&buf->lock); |
| 472 | buf->is_secure = flags & ION_SECURE ? 1 : 0; |
| 473 | buffer->priv_virt = buf; |
| 474 | |
| 475 | return 0; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 476 | } |
| 477 | |
| 478 | static void ion_cp_heap_free(struct ion_buffer *buffer) |
| 479 | { |
| 480 | struct ion_heap *heap = buffer->heap; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 481 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 482 | |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 483 | ion_cp_free(heap, buf->buffer, buffer->size); |
| 484 | WARN_ON(atomic_read(&buf->secure_cnt)); |
| 485 | WARN_ON(atomic_read(&buf->map_cnt)); |
| 486 | kfree(buf); |
| 487 | |
| 488 | buffer->priv_virt = NULL; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 489 | } |
| 490 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 491 | struct sg_table *ion_cp_heap_create_sg_table(struct ion_buffer *buffer) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 492 | { |
Mitchel Humpherys | c0aab03 | 2013-01-07 14:21:33 -0800 | [diff] [blame] | 493 | size_t chunk_size = buffer->size; |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 494 | struct sg_table *table; |
Mitchel Humpherys | c0aab03 | 2013-01-07 14:21:33 -0800 | [diff] [blame] | 495 | int ret, i, n_chunks; |
| 496 | struct scatterlist *sg; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 497 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 498 | |
Mitchel Humpherys | c0aab03 | 2013-01-07 14:21:33 -0800 | [diff] [blame] | 499 | if (ION_IS_CACHED(buffer->flags)) |
| 500 | chunk_size = PAGE_SIZE; |
| 501 | else if (buf->is_secure && IS_ALIGNED(buffer->size, SZ_1M)) |
| 502 | chunk_size = SZ_1M; |
| 503 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 504 | table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); |
| 505 | if (!table) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 506 | return ERR_PTR(-ENOMEM); |
| 507 | |
Mitchel Humpherys | c0aab03 | 2013-01-07 14:21:33 -0800 | [diff] [blame] | 508 | n_chunks = DIV_ROUND_UP(buffer->size, chunk_size); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 509 | |
Mitchel Humpherys | c0aab03 | 2013-01-07 14:21:33 -0800 | [diff] [blame] | 510 | ret = sg_alloc_table(table, n_chunks, GFP_KERNEL); |
| 511 | if (ret) |
| 512 | goto err0; |
Laura Abbott | e84d77e | 2012-10-10 16:59:46 -0700 | [diff] [blame] | 513 | |
Mitchel Humpherys | c0aab03 | 2013-01-07 14:21:33 -0800 | [diff] [blame] | 514 | for_each_sg(table->sgl, sg, table->nents, i) { |
| 515 | sg_dma_address(sg) = buf->buffer + i * chunk_size; |
| 516 | sg->length = chunk_size; |
| 517 | sg->offset = 0; |
Laura Abbott | e84d77e | 2012-10-10 16:59:46 -0700 | [diff] [blame] | 518 | } |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 519 | |
| 520 | return table; |
| 521 | err0: |
| 522 | kfree(table); |
| 523 | return ERR_PTR(ret); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 524 | } |
| 525 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 526 | struct sg_table *ion_cp_heap_map_dma(struct ion_heap *heap, |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 527 | struct ion_buffer *buffer) |
| 528 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 529 | return ion_cp_heap_create_sg_table(buffer); |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 530 | } |
| 531 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 532 | void ion_cp_heap_unmap_dma(struct ion_heap *heap, |
| 533 | struct ion_buffer *buffer) |
| 534 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 535 | if (buffer->sg_table) |
| 536 | sg_free_table(buffer->sg_table); |
| 537 | kfree(buffer->sg_table); |
| 538 | buffer->sg_table = 0; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 539 | } |
| 540 | |
| 541 | /** |
| 542 | * Call request region for SMI memory of this is the first mapping. |
| 543 | */ |
| 544 | static int ion_cp_request_region(struct ion_cp_heap *cp_heap) |
| 545 | { |
| 546 | int ret_value = 0; |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 547 | if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0) |
Laura Abbott | aedbe42 | 2012-08-03 17:06:22 -0700 | [diff] [blame] | 548 | if (cp_heap->heap_request_region) |
| 549 | ret_value = cp_heap->heap_request_region( |
| 550 | cp_heap->bus_id); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 551 | return ret_value; |
| 552 | } |
| 553 | |
| 554 | /** |
| 555 | * Call release region for SMI memory of this is the last un-mapping. |
| 556 | */ |
| 557 | static int ion_cp_release_region(struct ion_cp_heap *cp_heap) |
| 558 | { |
| 559 | int ret_value = 0; |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 560 | if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0) |
Laura Abbott | aedbe42 | 2012-08-03 17:06:22 -0700 | [diff] [blame] | 561 | if (cp_heap->heap_release_region) |
| 562 | ret_value = cp_heap->heap_release_region( |
| 563 | cp_heap->bus_id); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 564 | return ret_value; |
| 565 | } |
| 566 | |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 567 | void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base, |
| 568 | void *virt_base, unsigned long flags) |
| 569 | { |
| 570 | int ret; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 571 | struct ion_cp_buffer *buf = buffer->priv_virt; |
| 572 | unsigned int offset = buf->buffer - phys_base; |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 573 | unsigned long start = ((unsigned long)virt_base) + offset; |
| 574 | const struct mem_type *type = ION_IS_CACHED(flags) ? |
| 575 | get_mem_type(MT_DEVICE_CACHED) : |
| 576 | get_mem_type(MT_DEVICE); |
| 577 | |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 578 | if (phys_base > buf->buffer) |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 579 | return NULL; |
| 580 | |
| 581 | |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 582 | ret = ioremap_pages(start, buf->buffer, buffer->size, type); |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 583 | |
| 584 | if (!ret) |
| 585 | return (void *)start; |
| 586 | else |
| 587 | return NULL; |
| 588 | } |
| 589 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 590 | void *ion_cp_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 591 | { |
| 592 | struct ion_cp_heap *cp_heap = |
| 593 | container_of(heap, struct ion_cp_heap, heap); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 594 | void *ret_value = NULL; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 595 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 596 | |
| 597 | mutex_lock(&cp_heap->lock); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 598 | if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) || |
| 599 | ((cp_heap->heap_protected == HEAP_PROTECTED) && |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 600 | !ION_IS_CACHED(buffer->flags))) { |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 601 | |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 602 | if (ion_cp_request_region(cp_heap)) { |
| 603 | mutex_unlock(&cp_heap->lock); |
| 604 | return NULL; |
| 605 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 606 | |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 607 | if (cp_heap->reusable) { |
| 608 | ret_value = ion_map_fmem_buffer(buffer, cp_heap->base, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 609 | cp_heap->reserved_vrange, buffer->flags); |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 610 | } else if (cp_heap->cma) { |
| 611 | int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; |
| 612 | struct page **pages = vmalloc( |
| 613 | sizeof(struct page *) * npages); |
| 614 | int i; |
| 615 | pgprot_t pgprot; |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 616 | |
Olav Haugan | 245db99 | 2013-03-26 11:25:41 -0700 | [diff] [blame^] | 617 | if (!pages) { |
| 618 | mutex_unlock(&cp_heap->lock); |
| 619 | return ERR_PTR(-ENOMEM); |
| 620 | } |
| 621 | |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 622 | if (ION_IS_CACHED(buffer->flags)) |
| 623 | pgprot = PAGE_KERNEL; |
| 624 | else |
| 625 | pgprot = pgprot_writecombine(PAGE_KERNEL); |
| 626 | |
| 627 | for (i = 0; i < npages; i++) { |
| 628 | pages[i] = phys_to_page(buf->buffer + |
| 629 | i * PAGE_SIZE); |
| 630 | } |
| 631 | ret_value = vmap(pages, npages, VM_IOREMAP, pgprot); |
| 632 | vfree(pages); |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 633 | } else { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 634 | if (ION_IS_CACHED(buffer->flags)) |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 635 | ret_value = ioremap_cached(buf->buffer, |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 636 | buffer->size); |
| 637 | else |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 638 | ret_value = ioremap(buf->buffer, |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 639 | buffer->size); |
| 640 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 641 | |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 642 | if (!ret_value) { |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 643 | ion_cp_release_region(cp_heap); |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 644 | } else { |
| 645 | if (ION_IS_CACHED(buffer->flags)) |
| 646 | ++cp_heap->kmap_cached_count; |
| 647 | else |
| 648 | ++cp_heap->kmap_uncached_count; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 649 | atomic_inc(&buf->map_cnt); |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 650 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 651 | } |
| 652 | mutex_unlock(&cp_heap->lock); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 653 | return ret_value; |
| 654 | } |
| 655 | |
| 656 | void ion_cp_heap_unmap_kernel(struct ion_heap *heap, |
| 657 | struct ion_buffer *buffer) |
| 658 | { |
| 659 | struct ion_cp_heap *cp_heap = |
| 660 | container_of(heap, struct ion_cp_heap, heap); |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 661 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 662 | |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 663 | if (cp_heap->reusable) |
| 664 | unmap_kernel_range((unsigned long)buffer->vaddr, buffer->size); |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 665 | else if (cp_heap->cma) |
| 666 | vunmap(buffer->vaddr); |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 667 | else |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 668 | __arm_iounmap(buffer->vaddr); |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 669 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 670 | buffer->vaddr = NULL; |
| 671 | |
| 672 | mutex_lock(&cp_heap->lock); |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 673 | if (ION_IS_CACHED(buffer->flags)) |
| 674 | --cp_heap->kmap_cached_count; |
| 675 | else |
| 676 | --cp_heap->kmap_uncached_count; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 677 | |
| 678 | atomic_dec(&buf->map_cnt); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 679 | ion_cp_release_region(cp_heap); |
| 680 | mutex_unlock(&cp_heap->lock); |
| 681 | |
| 682 | return; |
| 683 | } |
| 684 | |
| 685 | int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 686 | struct vm_area_struct *vma) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 687 | { |
| 688 | int ret_value = -EAGAIN; |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 689 | struct ion_cp_heap *cp_heap = |
| 690 | container_of(heap, struct ion_cp_heap, heap); |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 691 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 692 | |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 693 | mutex_lock(&cp_heap->lock); |
Mitchel Humpherys | 8d0a492 | 2013-01-21 16:49:09 -0800 | [diff] [blame] | 694 | if (cp_heap->heap_protected == HEAP_NOT_PROTECTED && !buf->is_secure) { |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 695 | if (ion_cp_request_region(cp_heap)) { |
| 696 | mutex_unlock(&cp_heap->lock); |
| 697 | return -EINVAL; |
| 698 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 699 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 700 | if (!ION_IS_CACHED(buffer->flags)) |
Olav Haugan | de074a7 | 2012-02-22 15:39:54 -0800 | [diff] [blame] | 701 | vma->vm_page_prot = pgprot_writecombine( |
| 702 | vma->vm_page_prot); |
| 703 | |
| 704 | ret_value = remap_pfn_range(vma, vma->vm_start, |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 705 | __phys_to_pfn(buf->buffer) + vma->vm_pgoff, |
Olav Haugan | de074a7 | 2012-02-22 15:39:54 -0800 | [diff] [blame] | 706 | vma->vm_end - vma->vm_start, |
| 707 | vma->vm_page_prot); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 708 | |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 709 | if (ret_value) { |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 710 | ion_cp_release_region(cp_heap); |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 711 | } else { |
| 712 | atomic_inc(&buf->map_cnt); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 713 | ++cp_heap->umap_count; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 714 | } |
| 715 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 716 | } |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 717 | mutex_unlock(&cp_heap->lock); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 718 | return ret_value; |
| 719 | } |
| 720 | |
| 721 | void ion_cp_heap_unmap_user(struct ion_heap *heap, |
| 722 | struct ion_buffer *buffer) |
| 723 | { |
| 724 | struct ion_cp_heap *cp_heap = |
| 725 | container_of(heap, struct ion_cp_heap, heap); |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 726 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 727 | |
| 728 | mutex_lock(&cp_heap->lock); |
| 729 | --cp_heap->umap_count; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 730 | atomic_dec(&buf->map_cnt); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 731 | ion_cp_release_region(cp_heap); |
| 732 | mutex_unlock(&cp_heap->lock); |
| 733 | } |
| 734 | |
| 735 | int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer, |
| 736 | void *vaddr, unsigned int offset, unsigned int length, |
| 737 | unsigned int cmd) |
| 738 | { |
Neeti Desai | 3f3c282 | 2013-03-08 17:29:53 -0800 | [diff] [blame] | 739 | void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL; |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 740 | struct ion_cp_heap *cp_heap = |
Neeti Desai | 3f3c282 | 2013-03-08 17:29:53 -0800 | [diff] [blame] | 741 | container_of(heap, struct ion_cp_heap, heap); |
| 742 | unsigned int size_to_vmap, total_size; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 743 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Neeti Desai | 3f3c282 | 2013-03-08 17:29:53 -0800 | [diff] [blame] | 744 | int i, j; |
| 745 | void *ptr = NULL; |
| 746 | ion_phys_addr_t buff_phys = buffer->priv_phys; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 747 | |
Neeti Desai | 3f3c282 | 2013-03-08 17:29:53 -0800 | [diff] [blame] | 748 | if (!vaddr) { |
| 749 | /* |
| 750 | * Split the vmalloc space into smaller regions in |
| 751 | * order to clean and/or invalidate the cache. |
| 752 | */ |
| 753 | size_to_vmap = (VMALLOC_END - VMALLOC_START)/8; |
| 754 | total_size = buffer->size; |
| 755 | for (i = 0; i < total_size; i += size_to_vmap) { |
| 756 | size_to_vmap = min(size_to_vmap, total_size - i); |
| 757 | for (j = 0; j < 10 && size_to_vmap; ++j) { |
| 758 | ptr = ioremap(buff_phys, size_to_vmap); |
| 759 | if (ptr) { |
| 760 | switch (cmd) { |
| 761 | case ION_IOC_CLEAN_CACHES: |
| 762 | dmac_clean_range(ptr, |
| 763 | ptr + size_to_vmap); |
| 764 | outer_cache_op = |
| 765 | outer_clean_range; |
| 766 | break; |
| 767 | case ION_IOC_INV_CACHES: |
| 768 | dmac_inv_range(ptr, |
| 769 | ptr + size_to_vmap); |
| 770 | outer_cache_op = |
| 771 | outer_inv_range; |
| 772 | break; |
| 773 | case ION_IOC_CLEAN_INV_CACHES: |
| 774 | dmac_flush_range(ptr, |
| 775 | ptr + size_to_vmap); |
| 776 | outer_cache_op = |
| 777 | outer_flush_range; |
| 778 | break; |
| 779 | default: |
| 780 | return -EINVAL; |
| 781 | } |
| 782 | buff_phys += size_to_vmap; |
| 783 | break; |
| 784 | } else { |
| 785 | size_to_vmap >>= 1; |
| 786 | } |
| 787 | } |
| 788 | if (!ptr) { |
| 789 | pr_err("Couldn't io-remap the memory\n"); |
| 790 | return -EINVAL; |
| 791 | } |
| 792 | iounmap(ptr); |
| 793 | } |
| 794 | } else { |
| 795 | switch (cmd) { |
| 796 | case ION_IOC_CLEAN_CACHES: |
| 797 | dmac_clean_range(vaddr, vaddr + length); |
| 798 | outer_cache_op = outer_clean_range; |
| 799 | break; |
| 800 | case ION_IOC_INV_CACHES: |
| 801 | dmac_inv_range(vaddr, vaddr + length); |
| 802 | outer_cache_op = outer_inv_range; |
| 803 | break; |
| 804 | case ION_IOC_CLEAN_INV_CACHES: |
| 805 | dmac_flush_range(vaddr, vaddr + length); |
| 806 | outer_cache_op = outer_flush_range; |
| 807 | break; |
| 808 | default: |
| 809 | return -EINVAL; |
| 810 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 811 | } |
| 812 | |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 813 | if (cp_heap->has_outer_cache) { |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 814 | unsigned long pstart = buf->buffer + offset; |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 815 | outer_cache_op(pstart, pstart + length); |
| 816 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 817 | return 0; |
| 818 | } |
| 819 | |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 820 | static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s, |
| 821 | const struct rb_root *mem_map) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 822 | { |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 823 | unsigned long total_alloc; |
| 824 | unsigned long total_size; |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 825 | unsigned long umap_count; |
| 826 | unsigned long kmap_count; |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 827 | unsigned long heap_protected; |
Olav Haugan | e1f5d83 | 2011-12-13 15:16:28 -0800 | [diff] [blame] | 828 | struct ion_cp_heap *cp_heap = |
| 829 | container_of(heap, struct ion_cp_heap, heap); |
Olav Haugan | e1f5d83 | 2011-12-13 15:16:28 -0800 | [diff] [blame] | 830 | |
| 831 | mutex_lock(&cp_heap->lock); |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 832 | total_alloc = cp_heap->allocated_bytes; |
| 833 | total_size = cp_heap->total_size; |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 834 | umap_count = cp_heap->umap_count; |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 835 | kmap_count = ion_cp_get_total_kmap_count(cp_heap); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 836 | heap_protected = cp_heap->heap_protected == HEAP_PROTECTED; |
Olav Haugan | e1f5d83 | 2011-12-13 15:16:28 -0800 | [diff] [blame] | 837 | mutex_unlock(&cp_heap->lock); |
| 838 | |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 839 | seq_printf(s, "total bytes currently allocated: %lx\n", total_alloc); |
| 840 | seq_printf(s, "total heap size: %lx\n", total_size); |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 841 | seq_printf(s, "umapping count: %lx\n", umap_count); |
| 842 | seq_printf(s, "kmapping count: %lx\n", kmap_count); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 843 | seq_printf(s, "heap protected: %s\n", heap_protected ? "Yes" : "No"); |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 844 | seq_printf(s, "reusable: %s\n", cp_heap->reusable ? "Yes" : "No"); |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 845 | |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 846 | if (mem_map) { |
| 847 | unsigned long base = cp_heap->base; |
| 848 | unsigned long size = cp_heap->total_size; |
| 849 | unsigned long end = base+size; |
| 850 | unsigned long last_end = base; |
| 851 | struct rb_node *n; |
| 852 | |
| 853 | seq_printf(s, "\nMemory Map\n"); |
| 854 | seq_printf(s, "%16.s %14.s %14.s %14.s\n", |
| 855 | "client", "start address", "end address", |
| 856 | "size (hex)"); |
| 857 | |
| 858 | for (n = rb_first(mem_map); n; n = rb_next(n)) { |
| 859 | struct mem_map_data *data = |
| 860 | rb_entry(n, struct mem_map_data, node); |
| 861 | const char *client_name = "(null)"; |
| 862 | |
| 863 | if (last_end < data->addr) { |
Laura Abbott | 1135c9e | 2013-03-13 15:33:40 -0700 | [diff] [blame] | 864 | phys_addr_t da; |
| 865 | |
| 866 | da = data->addr-1; |
| 867 | seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n", |
| 868 | "FREE", &last_end, &da, |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 869 | data->addr-last_end, |
| 870 | data->addr-last_end); |
| 871 | } |
| 872 | |
| 873 | if (data->client_name) |
| 874 | client_name = data->client_name; |
| 875 | |
Laura Abbott | 1135c9e | 2013-03-13 15:33:40 -0700 | [diff] [blame] | 876 | seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n", |
| 877 | client_name, &data->addr, |
| 878 | &data->addr_end, |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 879 | data->size, data->size); |
| 880 | last_end = data->addr_end+1; |
| 881 | } |
| 882 | if (last_end < end) { |
| 883 | seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE", |
| 884 | last_end, end-1, end-last_end, end-last_end); |
| 885 | } |
| 886 | } |
| 887 | |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 888 | return 0; |
Olav Haugan | e1f5d83 | 2011-12-13 15:16:28 -0800 | [diff] [blame] | 889 | } |
| 890 | |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 891 | int ion_cp_secure_heap(struct ion_heap *heap, int version, void *data) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 892 | { |
| 893 | int ret_value; |
| 894 | struct ion_cp_heap *cp_heap = |
| 895 | container_of(heap, struct ion_cp_heap, heap); |
| 896 | mutex_lock(&cp_heap->lock); |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 897 | if (cp_heap->umap_count == 0 && cp_heap->kmap_cached_count == 0) { |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 898 | ret_value = ion_cp_protect(heap, version, data); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 899 | } else { |
| 900 | pr_err("ION cannot secure heap with outstanding mappings: " |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 901 | "User space: %lu, kernel space (cached): %lu\n", |
| 902 | cp_heap->umap_count, cp_heap->kmap_cached_count); |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 903 | ret_value = -EINVAL; |
| 904 | } |
| 905 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 906 | mutex_unlock(&cp_heap->lock); |
| 907 | return ret_value; |
| 908 | } |
| 909 | |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 910 | int ion_cp_unsecure_heap(struct ion_heap *heap, int version, void *data) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 911 | { |
| 912 | int ret_value = 0; |
| 913 | struct ion_cp_heap *cp_heap = |
| 914 | container_of(heap, struct ion_cp_heap, heap); |
| 915 | mutex_lock(&cp_heap->lock); |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 916 | ion_cp_unprotect(heap, version, data); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 917 | mutex_unlock(&cp_heap->lock); |
| 918 | return ret_value; |
| 919 | } |
| 920 | |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 921 | static int iommu_map_all(unsigned long domain_num, struct ion_cp_heap *cp_heap, |
Olav Haugan | 3450cae | 2012-05-14 11:36:38 -0700 | [diff] [blame] | 922 | int partition, unsigned long prot) |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 923 | { |
| 924 | unsigned long left_to_map = cp_heap->total_size; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 925 | unsigned long page_size = SZ_64K; |
| 926 | int ret_value = 0; |
| 927 | unsigned long virt_addr_len = cp_heap->total_size; |
| 928 | struct iommu_domain *domain = msm_get_iommu_domain(domain_num); |
| 929 | |
| 930 | /* If we are mapping into the video domain we need to map twice the |
| 931 | * size of the heap to account for prefetch issue in video core. |
| 932 | */ |
| 933 | if (domain_num == cp_heap->iommu_2x_map_domain) |
| 934 | virt_addr_len <<= 1; |
| 935 | |
| 936 | if (cp_heap->total_size & (SZ_64K-1)) { |
| 937 | pr_err("Heap size is not aligned to 64K, cannot map into IOMMU\n"); |
| 938 | ret_value = -EINVAL; |
| 939 | } |
| 940 | if (cp_heap->base & (SZ_64K-1)) { |
| 941 | pr_err("Heap physical address is not aligned to 64K, cannot map into IOMMU\n"); |
| 942 | ret_value = -EINVAL; |
| 943 | } |
| 944 | if (!ret_value && domain) { |
| 945 | unsigned long temp_phys = cp_heap->base; |
Laura Abbott | d01221b | 2012-05-16 17:52:49 -0700 | [diff] [blame] | 946 | unsigned long temp_iova; |
| 947 | |
| 948 | ret_value = msm_allocate_iova_address(domain_num, partition, |
| 949 | virt_addr_len, SZ_64K, |
| 950 | &temp_iova); |
| 951 | |
| 952 | if (ret_value) { |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 953 | pr_err("%s: could not allocate iova from domain %lu, partition %d\n", |
| 954 | __func__, domain_num, partition); |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 955 | goto out; |
| 956 | } |
| 957 | cp_heap->iommu_iova[domain_num] = temp_iova; |
| 958 | |
| 959 | while (left_to_map) { |
| 960 | int ret = iommu_map(domain, temp_iova, temp_phys, |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 961 | page_size, prot); |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 962 | if (ret) { |
| 963 | pr_err("%s: could not map %lx in domain %p, error: %d\n", |
| 964 | __func__, temp_iova, domain, ret); |
| 965 | ret_value = -EAGAIN; |
| 966 | goto free_iova; |
| 967 | } |
| 968 | temp_iova += page_size; |
| 969 | temp_phys += page_size; |
| 970 | left_to_map -= page_size; |
| 971 | } |
| 972 | if (domain_num == cp_heap->iommu_2x_map_domain) |
| 973 | ret_value = msm_iommu_map_extra(domain, temp_iova, |
Mitchel Humpherys | af3b522 | 2013-01-15 15:38:52 -0800 | [diff] [blame] | 974 | cp_heap->base, |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 975 | cp_heap->total_size, |
| 976 | SZ_64K, prot); |
| 977 | if (ret_value) |
| 978 | goto free_iova; |
| 979 | } else { |
| 980 | pr_err("Unable to get IOMMU domain %lu\n", domain_num); |
| 981 | ret_value = -ENOMEM; |
| 982 | } |
| 983 | goto out; |
| 984 | |
| 985 | free_iova: |
| 986 | msm_free_iova_address(cp_heap->iommu_iova[domain_num], domain_num, |
| 987 | partition, virt_addr_len); |
| 988 | out: |
| 989 | return ret_value; |
| 990 | } |
| 991 | |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 992 | static int ion_cp_heap_map_iommu(struct ion_buffer *buffer, |
| 993 | struct ion_iommu_map *data, |
| 994 | unsigned int domain_num, |
| 995 | unsigned int partition_num, |
| 996 | unsigned long align, |
| 997 | unsigned long iova_length, |
| 998 | unsigned long flags) |
| 999 | { |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1000 | struct iommu_domain *domain; |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 1001 | int ret = 0; |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1002 | unsigned long extra; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1003 | struct ion_cp_heap *cp_heap = |
| 1004 | container_of(buffer->heap, struct ion_cp_heap, heap); |
Olav Haugan | f310cf2 | 2012-05-08 08:42:49 -0700 | [diff] [blame] | 1005 | int prot = IOMMU_WRITE | IOMMU_READ; |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 1006 | struct ion_cp_buffer *buf = buffer->priv_virt; |
Olav Haugan | f310cf2 | 2012-05-08 08:42:49 -0700 | [diff] [blame] | 1007 | prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0; |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1008 | |
| 1009 | data->mapped_size = iova_length; |
| 1010 | |
| 1011 | if (!msm_use_iommu()) { |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 1012 | data->iova_addr = buf->buffer; |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1013 | return 0; |
| 1014 | } |
| 1015 | |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1016 | if (cp_heap->iommu_iova[domain_num]) { |
| 1017 | /* Already mapped. */ |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 1018 | unsigned long offset = buf->buffer - cp_heap->base; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1019 | data->iova_addr = cp_heap->iommu_iova[domain_num] + offset; |
| 1020 | return 0; |
| 1021 | } else if (cp_heap->iommu_map_all) { |
Olav Haugan | 3450cae | 2012-05-14 11:36:38 -0700 | [diff] [blame] | 1022 | ret = iommu_map_all(domain_num, cp_heap, partition_num, prot); |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1023 | if (!ret) { |
| 1024 | unsigned long offset = |
Laura Abbott | 60ae937 | 2012-10-10 16:28:59 -0700 | [diff] [blame] | 1025 | buf->buffer - cp_heap->base; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1026 | data->iova_addr = |
| 1027 | cp_heap->iommu_iova[domain_num] + offset; |
| 1028 | cp_heap->iommu_partition[domain_num] = partition_num; |
| 1029 | /* |
| 1030 | clear delayed map flag so that we don't interfere |
| 1031 | with this feature (we are already delaying). |
| 1032 | */ |
| 1033 | data->flags &= ~ION_IOMMU_UNMAP_DELAYED; |
| 1034 | return 0; |
| 1035 | } else { |
| 1036 | cp_heap->iommu_iova[domain_num] = 0; |
| 1037 | cp_heap->iommu_partition[domain_num] = 0; |
| 1038 | return ret; |
| 1039 | } |
| 1040 | } |
| 1041 | |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1042 | extra = iova_length - buffer->size; |
| 1043 | |
Laura Abbott | d01221b | 2012-05-16 17:52:49 -0700 | [diff] [blame] | 1044 | ret = msm_allocate_iova_address(domain_num, partition_num, |
| 1045 | data->mapped_size, align, |
| 1046 | &data->iova_addr); |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1047 | |
Laura Abbott | d01221b | 2012-05-16 17:52:49 -0700 | [diff] [blame] | 1048 | if (ret) |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1049 | goto out; |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1050 | |
| 1051 | domain = msm_get_iommu_domain(domain_num); |
| 1052 | |
| 1053 | if (!domain) { |
| 1054 | ret = -ENOMEM; |
| 1055 | goto out1; |
| 1056 | } |
| 1057 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1058 | ret = iommu_map_range(domain, data->iova_addr, buffer->sg_table->sgl, |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 1059 | buffer->size, prot); |
| 1060 | if (ret) { |
| 1061 | pr_err("%s: could not map %lx in domain %p\n", |
| 1062 | __func__, data->iova_addr, domain); |
| 1063 | goto out1; |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1064 | } |
| 1065 | |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 1066 | if (extra) { |
| 1067 | unsigned long extra_iova_addr = data->iova_addr + buffer->size; |
Mitchel Humpherys | af3b522 | 2013-01-15 15:38:52 -0800 | [diff] [blame] | 1068 | unsigned long phys_addr = sg_phys(buffer->sg_table->sgl); |
| 1069 | ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr, |
| 1070 | extra, SZ_4K, prot); |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 1071 | if (ret) |
| 1072 | goto out2; |
| 1073 | } |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 1074 | return ret; |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1075 | |
| 1076 | out2: |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 1077 | iommu_unmap_range(domain, data->iova_addr, buffer->size); |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1078 | out1: |
| 1079 | msm_free_iova_address(data->iova_addr, domain_num, partition_num, |
| 1080 | data->mapped_size); |
| 1081 | out: |
| 1082 | return ret; |
| 1083 | } |
| 1084 | |
| 1085 | static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data) |
| 1086 | { |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1087 | unsigned int domain_num; |
| 1088 | unsigned int partition_num; |
| 1089 | struct iommu_domain *domain; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1090 | struct ion_cp_heap *cp_heap = |
| 1091 | container_of(data->buffer->heap, struct ion_cp_heap, heap); |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1092 | |
| 1093 | if (!msm_use_iommu()) |
| 1094 | return; |
| 1095 | |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1096 | |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1097 | domain_num = iommu_map_domain(data); |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1098 | |
| 1099 | /* If we are mapping everything we'll wait to unmap until everything |
| 1100 | is freed. */ |
| 1101 | if (cp_heap->iommu_iova[domain_num]) |
| 1102 | return; |
| 1103 | |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1104 | partition_num = iommu_map_partition(data); |
| 1105 | |
| 1106 | domain = msm_get_iommu_domain(domain_num); |
| 1107 | |
| 1108 | if (!domain) { |
| 1109 | WARN(1, "Could not get domain %d. Corruption?\n", domain_num); |
| 1110 | return; |
| 1111 | } |
| 1112 | |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 1113 | iommu_unmap_range(domain, data->iova_addr, data->mapped_size); |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1114 | msm_free_iova_address(data->iova_addr, domain_num, partition_num, |
| 1115 | data->mapped_size); |
| 1116 | |
| 1117 | return; |
| 1118 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1119 | |
| 1120 | static struct ion_heap_ops cp_heap_ops = { |
| 1121 | .allocate = ion_cp_heap_allocate, |
| 1122 | .free = ion_cp_heap_free, |
| 1123 | .phys = ion_cp_heap_phys, |
| 1124 | .map_user = ion_cp_heap_map_user, |
| 1125 | .unmap_user = ion_cp_heap_unmap_user, |
| 1126 | .map_kernel = ion_cp_heap_map_kernel, |
| 1127 | .unmap_kernel = ion_cp_heap_unmap_kernel, |
| 1128 | .map_dma = ion_cp_heap_map_dma, |
| 1129 | .unmap_dma = ion_cp_heap_unmap_dma, |
| 1130 | .cache_op = ion_cp_cache_ops, |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 1131 | .print_debug = ion_cp_print_debug, |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1132 | .secure_heap = ion_cp_secure_heap, |
| 1133 | .unsecure_heap = ion_cp_unsecure_heap, |
Olav Haugan | 1c94f7b | 2012-02-08 09:45:53 -0800 | [diff] [blame] | 1134 | .map_iommu = ion_cp_heap_map_iommu, |
| 1135 | .unmap_iommu = ion_cp_heap_unmap_iommu, |
Laura Abbott | 9361930 | 2012-10-11 11:51:40 -0700 | [diff] [blame] | 1136 | .secure_buffer = ion_cp_secure_buffer, |
| 1137 | .unsecure_buffer = ion_cp_unsecure_buffer, |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1138 | }; |
| 1139 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1140 | struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data) |
| 1141 | { |
| 1142 | struct ion_cp_heap *cp_heap; |
| 1143 | int ret; |
| 1144 | |
| 1145 | cp_heap = kzalloc(sizeof(*cp_heap), GFP_KERNEL); |
| 1146 | if (!cp_heap) |
| 1147 | return ERR_PTR(-ENOMEM); |
| 1148 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1149 | mutex_init(&cp_heap->lock); |
| 1150 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1151 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1152 | cp_heap->allocated_bytes = 0; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1153 | cp_heap->umap_count = 0; |
Olav Haugan | 2a5404b | 2012-02-01 17:51:30 -0800 | [diff] [blame] | 1154 | cp_heap->kmap_cached_count = 0; |
| 1155 | cp_heap->kmap_uncached_count = 0; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1156 | cp_heap->total_size = heap_data->size; |
| 1157 | cp_heap->heap.ops = &cp_heap_ops; |
Mitchel Humpherys | 362b52b | 2012-09-13 10:53:22 -0700 | [diff] [blame] | 1158 | cp_heap->heap.type = (enum ion_heap_type) ION_HEAP_TYPE_CP; |
Olav Haugan | ea66e7a | 2012-01-23 17:30:27 -0800 | [diff] [blame] | 1159 | cp_heap->heap_protected = HEAP_NOT_PROTECTED; |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 1160 | cp_heap->secure_base = heap_data->base; |
Olav Haugan | 42ebe71 | 2012-01-10 16:30:58 -0800 | [diff] [blame] | 1161 | cp_heap->secure_size = heap_data->size; |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 1162 | cp_heap->has_outer_cache = heap_data->has_outer_cache; |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 1163 | cp_heap->heap_size = heap_data->size; |
| 1164 | |
Laura Abbott | f68983e | 2012-06-13 16:23:23 -0700 | [diff] [blame] | 1165 | atomic_set(&cp_heap->protect_cnt, 0); |
Olav Haugan | 0703dbf | 2011-12-19 17:53:38 -0800 | [diff] [blame] | 1166 | if (heap_data->extra_data) { |
| 1167 | struct ion_cp_heap_pdata *extra_data = |
| 1168 | heap_data->extra_data; |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame] | 1169 | cp_heap->reusable = extra_data->reusable; |
| 1170 | cp_heap->reserved_vrange = extra_data->virt_addr; |
Olav Haugan | 0703dbf | 2011-12-19 17:53:38 -0800 | [diff] [blame] | 1171 | cp_heap->permission_type = extra_data->permission_type; |
Olav Haugan | 42ebe71 | 2012-01-10 16:30:58 -0800 | [diff] [blame] | 1172 | if (extra_data->secure_size) { |
| 1173 | cp_heap->secure_base = extra_data->secure_base; |
| 1174 | cp_heap->secure_size = extra_data->secure_size; |
| 1175 | } |
Olav Haugan | 0703dbf | 2011-12-19 17:53:38 -0800 | [diff] [blame] | 1176 | if (extra_data->setup_region) |
| 1177 | cp_heap->bus_id = extra_data->setup_region(); |
| 1178 | if (extra_data->request_region) |
Laura Abbott | aedbe42 | 2012-08-03 17:06:22 -0700 | [diff] [blame] | 1179 | cp_heap->heap_request_region = |
| 1180 | extra_data->request_region; |
Olav Haugan | 0703dbf | 2011-12-19 17:53:38 -0800 | [diff] [blame] | 1181 | if (extra_data->release_region) |
Laura Abbott | aedbe42 | 2012-08-03 17:06:22 -0700 | [diff] [blame] | 1182 | cp_heap->heap_release_region = |
| 1183 | extra_data->release_region; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1184 | cp_heap->iommu_map_all = |
| 1185 | extra_data->iommu_map_all; |
| 1186 | cp_heap->iommu_2x_map_domain = |
| 1187 | extra_data->iommu_2x_map_domain; |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 1188 | cp_heap->cma = extra_data->is_cma; |
Mitchel Humpherys | 345f023 | 2013-01-11 10:55:25 -0800 | [diff] [blame] | 1189 | cp_heap->allow_non_secure_allocation = |
| 1190 | extra_data->allow_nonsecure_alloc; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1191 | |
Olav Haugan | 0703dbf | 2011-12-19 17:53:38 -0800 | [diff] [blame] | 1192 | } |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 1193 | |
Laura Abbott | 3180a5f | 2012-08-03 17:31:03 -0700 | [diff] [blame] | 1194 | if (cp_heap->cma) { |
| 1195 | cp_heap->pool = NULL; |
| 1196 | cp_heap->cpu_addr = 0; |
| 1197 | cp_heap->heap.priv = heap_data->priv; |
| 1198 | } else { |
| 1199 | cp_heap->pool = gen_pool_create(12, -1); |
| 1200 | if (!cp_heap->pool) |
| 1201 | goto free_heap; |
| 1202 | |
| 1203 | cp_heap->base = heap_data->base; |
| 1204 | ret = gen_pool_add(cp_heap->pool, cp_heap->base, |
| 1205 | heap_data->size, -1); |
| 1206 | if (ret < 0) |
| 1207 | goto destroy_pool; |
| 1208 | |
| 1209 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1210 | return &cp_heap->heap; |
| 1211 | |
| 1212 | destroy_pool: |
| 1213 | gen_pool_destroy(cp_heap->pool); |
| 1214 | |
| 1215 | free_heap: |
| 1216 | kfree(cp_heap); |
| 1217 | |
| 1218 | return ERR_PTR(-ENOMEM); |
| 1219 | } |
| 1220 | |
| 1221 | void ion_cp_heap_destroy(struct ion_heap *heap) |
| 1222 | { |
| 1223 | struct ion_cp_heap *cp_heap = |
| 1224 | container_of(heap, struct ion_cp_heap, heap); |
| 1225 | |
| 1226 | gen_pool_destroy(cp_heap->pool); |
| 1227 | kfree(cp_heap); |
| 1228 | cp_heap = NULL; |
| 1229 | } |
| 1230 | |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 1231 | void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base, |
| 1232 | unsigned long *size) \ |
| 1233 | { |
| 1234 | struct ion_cp_heap *cp_heap = |
| 1235 | container_of(heap, struct ion_cp_heap, heap); |
| 1236 | *base = cp_heap->base; |
| 1237 | *size = cp_heap->total_size; |
| 1238 | } |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1239 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1240 | |