Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1 | /* |
| 2 | * drivers/gpu/ion/ion_carveout_heap.c |
| 3 | * |
| 4 | * Copyright (C) 2011 Google, Inc. |
Mitchel Humpherys | af3b522 | 2013-01-15 15:38:52 -0800 | [diff] [blame] | 5 | * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 6 | * |
| 7 | * This software is licensed under the terms of the GNU General Public |
| 8 | * License version 2, as published by the Free Software Foundation, and |
| 9 | * may be copied, distributed, and modified under those terms. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | */ |
| 17 | #include <linux/spinlock.h> |
| 18 | |
| 19 | #include <linux/err.h> |
| 20 | #include <linux/genalloc.h> |
| 21 | #include <linux/io.h> |
| 22 | #include <linux/ion.h> |
| 23 | #include <linux/mm.h> |
| 24 | #include <linux/scatterlist.h> |
| 25 | #include <linux/slab.h> |
| 26 | #include <linux/vmalloc.h> |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 27 | #include <linux/iommu.h> |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 28 | #include <linux/seq_file.h> |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 29 | #include "ion_priv.h" |
| 30 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 31 | #include <mach/iommu_domains.h> |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 32 | #include <asm/mach/map.h> |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 33 | #include <asm/cacheflush.h> |
Mitchel Humpherys | af2e5c5 | 2012-09-06 12:16:36 -0700 | [diff] [blame] | 34 | #include <linux/msm_ion.h> |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 35 | |
| 36 | struct ion_carveout_heap { |
| 37 | struct ion_heap heap; |
| 38 | struct gen_pool *pool; |
| 39 | ion_phys_addr_t base; |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 40 | unsigned long allocated_bytes; |
| 41 | unsigned long total_size; |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 42 | unsigned int has_outer_cache; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 43 | }; |
| 44 | |
| 45 | ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, |
| 46 | unsigned long size, |
| 47 | unsigned long align) |
| 48 | { |
| 49 | struct ion_carveout_heap *carveout_heap = |
| 50 | container_of(heap, struct ion_carveout_heap, heap); |
Laura Abbott | 5ee3921 | 2011-10-11 10:12:40 -0700 | [diff] [blame] | 51 | unsigned long offset = gen_pool_alloc_aligned(carveout_heap->pool, |
| 52 | size, ilog2(align)); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 53 | |
Laura Abbott | d16dd60 | 2011-10-31 13:51:10 -0700 | [diff] [blame] | 54 | if (!offset) { |
| 55 | if ((carveout_heap->total_size - |
Olav Haugan | 739e518 | 2012-04-19 14:14:43 -0700 | [diff] [blame] | 56 | carveout_heap->allocated_bytes) >= size) |
Laura Abbott | d16dd60 | 2011-10-31 13:51:10 -0700 | [diff] [blame] | 57 | pr_debug("%s: heap %s has enough memory (%lx) but" |
| 58 | " the allocation of size %lx still failed." |
| 59 | " Memory is probably fragmented.", |
| 60 | __func__, heap->name, |
| 61 | carveout_heap->total_size - |
| 62 | carveout_heap->allocated_bytes, size); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 63 | return ION_CARVEOUT_ALLOCATE_FAIL; |
Laura Abbott | d16dd60 | 2011-10-31 13:51:10 -0700 | [diff] [blame] | 64 | } |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 65 | |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 66 | carveout_heap->allocated_bytes += size; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 67 | return offset; |
| 68 | } |
| 69 | |
| 70 | void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, |
| 71 | unsigned long size) |
| 72 | { |
| 73 | struct ion_carveout_heap *carveout_heap = |
| 74 | container_of(heap, struct ion_carveout_heap, heap); |
| 75 | |
| 76 | if (addr == ION_CARVEOUT_ALLOCATE_FAIL) |
| 77 | return; |
| 78 | gen_pool_free(carveout_heap->pool, addr, size); |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 79 | carveout_heap->allocated_bytes -= size; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 80 | } |
| 81 | |
| 82 | static int ion_carveout_heap_phys(struct ion_heap *heap, |
| 83 | struct ion_buffer *buffer, |
| 84 | ion_phys_addr_t *addr, size_t *len) |
| 85 | { |
| 86 | *addr = buffer->priv_phys; |
| 87 | *len = buffer->size; |
| 88 | return 0; |
| 89 | } |
| 90 | |
| 91 | static int ion_carveout_heap_allocate(struct ion_heap *heap, |
| 92 | struct ion_buffer *buffer, |
| 93 | unsigned long size, unsigned long align, |
| 94 | unsigned long flags) |
| 95 | { |
| 96 | buffer->priv_phys = ion_carveout_allocate(heap, size, align); |
| 97 | return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0; |
| 98 | } |
| 99 | |
| 100 | static void ion_carveout_heap_free(struct ion_buffer *buffer) |
| 101 | { |
| 102 | struct ion_heap *heap = buffer->heap; |
| 103 | |
| 104 | ion_carveout_free(heap, buffer->priv_phys, buffer->size); |
| 105 | buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL; |
| 106 | } |
| 107 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 108 | struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap, |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 109 | struct ion_buffer *buffer) |
| 110 | { |
Mitchel Humpherys | 0432d69 | 2013-01-08 17:03:10 -0800 | [diff] [blame] | 111 | size_t chunk_size = buffer->size; |
Jordan Crouse | 0a1195e | 2011-10-20 14:03:31 -0600 | [diff] [blame] | 112 | |
Mitchel Humpherys | 0432d69 | 2013-01-08 17:03:10 -0800 | [diff] [blame] | 113 | if (ION_IS_CACHED(buffer->flags)) |
| 114 | chunk_size = PAGE_SIZE; |
Jordan Crouse | 0a1195e | 2011-10-20 14:03:31 -0600 | [diff] [blame] | 115 | |
Mitchel Humpherys | 0432d69 | 2013-01-08 17:03:10 -0800 | [diff] [blame] | 116 | return ion_create_chunked_sg_table(buffer->priv_phys, chunk_size, |
| 117 | buffer->size); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 118 | } |
| 119 | |
| 120 | void ion_carveout_heap_unmap_dma(struct ion_heap *heap, |
| 121 | struct ion_buffer *buffer) |
| 122 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 123 | if (buffer->sg_table) |
| 124 | sg_free_table(buffer->sg_table); |
| 125 | kfree(buffer->sg_table); |
| 126 | buffer->sg_table = 0; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 127 | } |
| 128 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 129 | void *ion_carveout_heap_map_kernel(struct ion_heap *heap, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 130 | struct ion_buffer *buffer) |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 131 | { |
Olav Haugan | cb9ad9e | 2012-01-09 11:15:14 -0800 | [diff] [blame] | 132 | void *ret_value; |
Alex Bird | 8a3ede3 | 2011-11-07 12:33:42 -0800 | [diff] [blame] | 133 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 134 | if (ION_IS_CACHED(buffer->flags)) |
Olav Haugan | cb9ad9e | 2012-01-09 11:15:14 -0800 | [diff] [blame] | 135 | ret_value = ioremap_cached(buffer->priv_phys, buffer->size); |
Laura Abbott | e8f2802 | 2011-09-28 14:40:40 -0700 | [diff] [blame] | 136 | else |
Olav Haugan | cb9ad9e | 2012-01-09 11:15:14 -0800 | [diff] [blame] | 137 | ret_value = ioremap(buffer->priv_phys, buffer->size); |
| 138 | |
Olav Haugan | cb9ad9e | 2012-01-09 11:15:14 -0800 | [diff] [blame] | 139 | return ret_value; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 140 | } |
| 141 | |
| 142 | void ion_carveout_heap_unmap_kernel(struct ion_heap *heap, |
| 143 | struct ion_buffer *buffer) |
| 144 | { |
Colin Cross | 9a82640 | 2012-04-04 13:50:43 -0700 | [diff] [blame] | 145 | __arm_iounmap(buffer->vaddr); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 146 | buffer->vaddr = NULL; |
Alex Bird | 8a3ede3 | 2011-11-07 12:33:42 -0800 | [diff] [blame] | 147 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 148 | return; |
| 149 | } |
| 150 | |
| 151 | int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 152 | struct vm_area_struct *vma) |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 153 | { |
Olav Haugan | cb9ad9e | 2012-01-09 11:15:14 -0800 | [diff] [blame] | 154 | int ret_value = 0; |
Alex Bird | 8a3ede3 | 2011-11-07 12:33:42 -0800 | [diff] [blame] | 155 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 156 | if (!ION_IS_CACHED(buffer->flags)) |
Olav Haugan | de074a7 | 2012-02-22 15:39:54 -0800 | [diff] [blame] | 157 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
| 158 | |
| 159 | ret_value = remap_pfn_range(vma, vma->vm_start, |
| 160 | __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff, |
| 161 | vma->vm_end - vma->vm_start, |
| 162 | vma->vm_page_prot); |
Olav Haugan | cb9ad9e | 2012-01-09 11:15:14 -0800 | [diff] [blame] | 163 | |
Olav Haugan | cb9ad9e | 2012-01-09 11:15:14 -0800 | [diff] [blame] | 164 | return ret_value; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 165 | } |
| 166 | |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 167 | int ion_carveout_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer, |
| 168 | void *vaddr, unsigned int offset, unsigned int length, |
| 169 | unsigned int cmd) |
| 170 | { |
Neeti Desai | 3f3c282 | 2013-03-08 17:29:53 -0800 | [diff] [blame] | 171 | void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL; |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 172 | struct ion_carveout_heap *carveout_heap = |
| 173 | container_of(heap, struct ion_carveout_heap, heap); |
Neeti Desai | 3f3c282 | 2013-03-08 17:29:53 -0800 | [diff] [blame] | 174 | unsigned int size_to_vmap, total_size; |
| 175 | int i, j; |
| 176 | void *ptr = NULL; |
| 177 | ion_phys_addr_t buff_phys = buffer->priv_phys; |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 178 | |
Neeti Desai | 3f3c282 | 2013-03-08 17:29:53 -0800 | [diff] [blame] | 179 | if (!vaddr) { |
| 180 | /* |
| 181 | * Split the vmalloc space into smaller regions in |
| 182 | * order to clean and/or invalidate the cache. |
| 183 | */ |
| 184 | size_to_vmap = ((VMALLOC_END - VMALLOC_START)/8); |
| 185 | total_size = buffer->size; |
| 186 | |
| 187 | for (i = 0; i < total_size; i += size_to_vmap) { |
| 188 | size_to_vmap = min(size_to_vmap, total_size - i); |
| 189 | for (j = 0; j < 10 && size_to_vmap; ++j) { |
| 190 | ptr = ioremap(buff_phys, size_to_vmap); |
| 191 | if (ptr) { |
| 192 | switch (cmd) { |
| 193 | case ION_IOC_CLEAN_CACHES: |
| 194 | dmac_clean_range(ptr, |
| 195 | ptr + size_to_vmap); |
| 196 | outer_cache_op = |
| 197 | outer_clean_range; |
| 198 | break; |
| 199 | case ION_IOC_INV_CACHES: |
| 200 | dmac_inv_range(ptr, |
| 201 | ptr + size_to_vmap); |
| 202 | outer_cache_op = |
| 203 | outer_inv_range; |
| 204 | break; |
| 205 | case ION_IOC_CLEAN_INV_CACHES: |
| 206 | dmac_flush_range(ptr, |
| 207 | ptr + size_to_vmap); |
| 208 | outer_cache_op = |
| 209 | outer_flush_range; |
| 210 | break; |
| 211 | default: |
| 212 | return -EINVAL; |
| 213 | } |
| 214 | buff_phys += size_to_vmap; |
| 215 | break; |
| 216 | } else { |
| 217 | size_to_vmap >>= 1; |
| 218 | } |
| 219 | } |
| 220 | if (!ptr) { |
| 221 | pr_err("Couldn't io-remap the memory\n"); |
| 222 | return -EINVAL; |
| 223 | } |
| 224 | iounmap(ptr); |
| 225 | } |
| 226 | } else { |
| 227 | switch (cmd) { |
| 228 | case ION_IOC_CLEAN_CACHES: |
| 229 | dmac_clean_range(vaddr, vaddr + length); |
| 230 | outer_cache_op = outer_clean_range; |
| 231 | break; |
| 232 | case ION_IOC_INV_CACHES: |
| 233 | dmac_inv_range(vaddr, vaddr + length); |
| 234 | outer_cache_op = outer_inv_range; |
| 235 | break; |
| 236 | case ION_IOC_CLEAN_INV_CACHES: |
| 237 | dmac_flush_range(vaddr, vaddr + length); |
| 238 | outer_cache_op = outer_flush_range; |
| 239 | break; |
| 240 | default: |
| 241 | return -EINVAL; |
| 242 | } |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 243 | } |
| 244 | |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 245 | if (carveout_heap->has_outer_cache) { |
| 246 | unsigned long pstart = buffer->priv_phys + offset; |
| 247 | outer_cache_op(pstart, pstart + length); |
| 248 | } |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 249 | return 0; |
| 250 | } |
| 251 | |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 252 | static int ion_carveout_print_debug(struct ion_heap *heap, struct seq_file *s, |
| 253 | const struct rb_root *mem_map) |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 254 | { |
| 255 | struct ion_carveout_heap *carveout_heap = |
| 256 | container_of(heap, struct ion_carveout_heap, heap); |
| 257 | |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 258 | seq_printf(s, "total bytes currently allocated: %lx\n", |
| 259 | carveout_heap->allocated_bytes); |
| 260 | seq_printf(s, "total heap size: %lx\n", carveout_heap->total_size); |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 261 | |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 262 | if (mem_map) { |
| 263 | unsigned long base = carveout_heap->base; |
| 264 | unsigned long size = carveout_heap->total_size; |
| 265 | unsigned long end = base+size; |
| 266 | unsigned long last_end = base; |
| 267 | struct rb_node *n; |
| 268 | |
| 269 | seq_printf(s, "\nMemory Map\n"); |
| 270 | seq_printf(s, "%16.s %14.s %14.s %14.s\n", |
| 271 | "client", "start address", "end address", |
| 272 | "size (hex)"); |
| 273 | |
| 274 | for (n = rb_first(mem_map); n; n = rb_next(n)) { |
| 275 | struct mem_map_data *data = |
| 276 | rb_entry(n, struct mem_map_data, node); |
| 277 | const char *client_name = "(null)"; |
| 278 | |
| 279 | if (last_end < data->addr) { |
Laura Abbott | 1135c9e | 2013-03-13 15:33:40 -0700 | [diff] [blame] | 280 | phys_addr_t da; |
| 281 | |
| 282 | da = data->addr-1; |
| 283 | seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n", |
| 284 | "FREE", &last_end, &da, |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 285 | data->addr-last_end, |
| 286 | data->addr-last_end); |
| 287 | } |
| 288 | |
| 289 | if (data->client_name) |
| 290 | client_name = data->client_name; |
| 291 | |
Laura Abbott | 1135c9e | 2013-03-13 15:33:40 -0700 | [diff] [blame] | 292 | seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n", |
| 293 | client_name, &data->addr, |
| 294 | &data->addr_end, |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 295 | data->size, data->size); |
| 296 | last_end = data->addr_end+1; |
| 297 | } |
| 298 | if (last_end < end) { |
| 299 | seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE", |
| 300 | last_end, end-1, end-last_end, end-last_end); |
| 301 | } |
| 302 | } |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 303 | return 0; |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 304 | } |
| 305 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 306 | int ion_carveout_heap_map_iommu(struct ion_buffer *buffer, |
| 307 | struct ion_iommu_map *data, |
| 308 | unsigned int domain_num, |
| 309 | unsigned int partition_num, |
| 310 | unsigned long align, |
| 311 | unsigned long iova_length, |
| 312 | unsigned long flags) |
| 313 | { |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 314 | struct iommu_domain *domain; |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 315 | int ret = 0; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 316 | unsigned long extra; |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 317 | struct scatterlist *sglist = 0; |
Olav Haugan | f310cf2 | 2012-05-08 08:42:49 -0700 | [diff] [blame] | 318 | int prot = IOMMU_WRITE | IOMMU_READ; |
| 319 | prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 320 | |
| 321 | data->mapped_size = iova_length; |
| 322 | |
| 323 | if (!msm_use_iommu()) { |
| 324 | data->iova_addr = buffer->priv_phys; |
| 325 | return 0; |
| 326 | } |
| 327 | |
| 328 | extra = iova_length - buffer->size; |
| 329 | |
Laura Abbott | d01221b | 2012-05-16 17:52:49 -0700 | [diff] [blame] | 330 | ret = msm_allocate_iova_address(domain_num, partition_num, |
| 331 | data->mapped_size, align, |
| 332 | &data->iova_addr); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 333 | |
Laura Abbott | d01221b | 2012-05-16 17:52:49 -0700 | [diff] [blame] | 334 | if (ret) |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 335 | goto out; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 336 | |
| 337 | domain = msm_get_iommu_domain(domain_num); |
| 338 | |
| 339 | if (!domain) { |
| 340 | ret = -ENOMEM; |
| 341 | goto out1; |
| 342 | } |
| 343 | |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 344 | sglist = vmalloc(sizeof(*sglist)); |
| 345 | if (!sglist) |
| 346 | goto out1; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 347 | |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 348 | sg_init_table(sglist, 1); |
| 349 | sglist->length = buffer->size; |
| 350 | sglist->offset = 0; |
| 351 | sglist->dma_address = buffer->priv_phys; |
| 352 | |
| 353 | ret = iommu_map_range(domain, data->iova_addr, sglist, |
| 354 | buffer->size, prot); |
| 355 | if (ret) { |
| 356 | pr_err("%s: could not map %lx in domain %p\n", |
| 357 | __func__, data->iova_addr, domain); |
| 358 | goto out1; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 359 | } |
| 360 | |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 361 | if (extra) { |
| 362 | unsigned long extra_iova_addr = data->iova_addr + buffer->size; |
Mitchel Humpherys | af3b522 | 2013-01-15 15:38:52 -0800 | [diff] [blame] | 363 | unsigned long phys_addr = sg_phys(sglist); |
| 364 | ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr, |
| 365 | extra, SZ_4K, prot); |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 366 | if (ret) |
| 367 | goto out2; |
| 368 | } |
| 369 | vfree(sglist); |
| 370 | return ret; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 371 | |
| 372 | out2: |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 373 | iommu_unmap_range(domain, data->iova_addr, buffer->size); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 374 | out1: |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 375 | vfree(sglist); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 376 | msm_free_iova_address(data->iova_addr, domain_num, partition_num, |
| 377 | data->mapped_size); |
| 378 | |
| 379 | out: |
| 380 | |
| 381 | return ret; |
| 382 | } |
| 383 | |
| 384 | void ion_carveout_heap_unmap_iommu(struct ion_iommu_map *data) |
| 385 | { |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 386 | unsigned int domain_num; |
| 387 | unsigned int partition_num; |
| 388 | struct iommu_domain *domain; |
| 389 | |
| 390 | if (!msm_use_iommu()) |
| 391 | return; |
| 392 | |
| 393 | domain_num = iommu_map_domain(data); |
| 394 | partition_num = iommu_map_partition(data); |
| 395 | |
| 396 | domain = msm_get_iommu_domain(domain_num); |
| 397 | |
| 398 | if (!domain) { |
| 399 | WARN(1, "Could not get domain %d. Corruption?\n", domain_num); |
| 400 | return; |
| 401 | } |
| 402 | |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 403 | iommu_unmap_range(domain, data->iova_addr, data->mapped_size); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 404 | msm_free_iova_address(data->iova_addr, domain_num, partition_num, |
| 405 | data->mapped_size); |
| 406 | |
| 407 | return; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 408 | } |
| 409 | |
| 410 | static struct ion_heap_ops carveout_heap_ops = { |
| 411 | .allocate = ion_carveout_heap_allocate, |
| 412 | .free = ion_carveout_heap_free, |
| 413 | .phys = ion_carveout_heap_phys, |
| 414 | .map_user = ion_carveout_heap_map_user, |
| 415 | .map_kernel = ion_carveout_heap_map_kernel, |
| 416 | .unmap_kernel = ion_carveout_heap_unmap_kernel, |
Jordan Crouse | 0a1195e | 2011-10-20 14:03:31 -0600 | [diff] [blame] | 417 | .map_dma = ion_carveout_heap_map_dma, |
| 418 | .unmap_dma = ion_carveout_heap_unmap_dma, |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 419 | .cache_op = ion_carveout_cache_ops, |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 420 | .print_debug = ion_carveout_print_debug, |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 421 | .map_iommu = ion_carveout_heap_map_iommu, |
| 422 | .unmap_iommu = ion_carveout_heap_unmap_iommu, |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 423 | }; |
| 424 | |
| 425 | struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) |
| 426 | { |
| 427 | struct ion_carveout_heap *carveout_heap; |
Laura Abbott | b1b7b43 | 2011-08-03 13:25:08 -0700 | [diff] [blame] | 428 | int ret; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 429 | |
| 430 | carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL); |
| 431 | if (!carveout_heap) |
| 432 | return ERR_PTR(-ENOMEM); |
| 433 | |
| 434 | carveout_heap->pool = gen_pool_create(12, -1); |
| 435 | if (!carveout_heap->pool) { |
| 436 | kfree(carveout_heap); |
| 437 | return ERR_PTR(-ENOMEM); |
| 438 | } |
| 439 | carveout_heap->base = heap_data->base; |
Laura Abbott | b1b7b43 | 2011-08-03 13:25:08 -0700 | [diff] [blame] | 440 | ret = gen_pool_add(carveout_heap->pool, carveout_heap->base, |
| 441 | heap_data->size, -1); |
| 442 | if (ret < 0) { |
Olav Haugan | 7fba5cf | 2012-01-06 10:05:31 -0800 | [diff] [blame] | 443 | gen_pool_destroy(carveout_heap->pool); |
Laura Abbott | b1b7b43 | 2011-08-03 13:25:08 -0700 | [diff] [blame] | 444 | kfree(carveout_heap); |
| 445 | return ERR_PTR(-EINVAL); |
| 446 | } |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 447 | carveout_heap->heap.ops = &carveout_heap_ops; |
| 448 | carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT; |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 449 | carveout_heap->allocated_bytes = 0; |
| 450 | carveout_heap->total_size = heap_data->size; |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 451 | carveout_heap->has_outer_cache = heap_data->has_outer_cache; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 452 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 453 | return &carveout_heap->heap; |
| 454 | } |
| 455 | |
| 456 | void ion_carveout_heap_destroy(struct ion_heap *heap) |
| 457 | { |
| 458 | struct ion_carveout_heap *carveout_heap = |
| 459 | container_of(heap, struct ion_carveout_heap, heap); |
| 460 | |
| 461 | gen_pool_destroy(carveout_heap->pool); |
| 462 | kfree(carveout_heap); |
| 463 | carveout_heap = NULL; |
| 464 | } |