Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 1 | /* |
Olav Haugan | dbec7db | 2012-02-25 10:32:41 -0800 | [diff] [blame] | 2 | * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 and |
| 6 | * only version 2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. |
| 12 | */ |
| 13 | #include <linux/err.h> |
| 14 | #include <linux/io.h> |
Mitchel Humpherys | af2e5c5 | 2012-09-06 12:16:36 -0700 | [diff] [blame] | 15 | #include <linux/msm_ion.h> |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 16 | #include <linux/mm.h> |
| 17 | #include <linux/scatterlist.h> |
| 18 | #include <linux/slab.h> |
| 19 | #include <linux/vmalloc.h> |
| 20 | #include <linux/iommu.h> |
| 21 | #include <linux/pfn.h> |
| 22 | #include "ion_priv.h" |
| 23 | |
| 24 | #include <asm/mach/map.h> |
| 25 | #include <asm/page.h> |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 26 | #include <asm/cacheflush.h> |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 27 | #include <mach/iommu_domains.h> |
| 28 | |
| 29 | struct ion_iommu_heap { |
| 30 | struct ion_heap heap; |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 31 | unsigned int has_outer_cache; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 32 | }; |
| 33 | |
| 34 | struct ion_iommu_priv_data { |
| 35 | struct page **pages; |
| 36 | int nrpages; |
| 37 | unsigned long size; |
| 38 | }; |
| 39 | |
| 40 | static int ion_iommu_heap_allocate(struct ion_heap *heap, |
| 41 | struct ion_buffer *buffer, |
| 42 | unsigned long size, unsigned long align, |
| 43 | unsigned long flags) |
| 44 | { |
| 45 | int ret, i; |
| 46 | struct ion_iommu_priv_data *data = NULL; |
| 47 | |
| 48 | if (msm_use_iommu()) { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 49 | struct scatterlist *sg; |
| 50 | struct sg_table *table; |
| 51 | unsigned int i; |
| 52 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 53 | data = kmalloc(sizeof(*data), GFP_KERNEL); |
| 54 | if (!data) |
| 55 | return -ENOMEM; |
| 56 | |
| 57 | data->size = PFN_ALIGN(size); |
| 58 | data->nrpages = data->size >> PAGE_SHIFT; |
| 59 | data->pages = kzalloc(sizeof(struct page *)*data->nrpages, |
| 60 | GFP_KERNEL); |
| 61 | if (!data->pages) { |
| 62 | ret = -ENOMEM; |
| 63 | goto err1; |
| 64 | } |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 65 | |
| 66 | table = buffer->sg_table = |
| 67 | kzalloc(sizeof(struct sg_table), GFP_KERNEL); |
| 68 | |
| 69 | if (!table) { |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 70 | ret = -ENOMEM; |
| 71 | goto err1; |
| 72 | } |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 73 | ret = sg_alloc_table(table, data->nrpages, GFP_KERNEL); |
| 74 | if (ret) |
| 75 | goto err2; |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 76 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 77 | for_each_sg(table->sgl, sg, table->nents, i) { |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 78 | data->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); |
| 79 | if (!data->pages[i]) |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 80 | goto err3; |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 81 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 82 | sg_set_page(sg, data->pages[i], PAGE_SIZE, 0); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 83 | } |
| 84 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 85 | buffer->priv_virt = data; |
| 86 | return 0; |
| 87 | |
| 88 | } else { |
| 89 | return -ENOMEM; |
| 90 | } |
| 91 | |
| 92 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 93 | err3: |
| 94 | sg_free_table(buffer->sg_table); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 95 | err2: |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 96 | kfree(buffer->sg_table); |
| 97 | buffer->sg_table = 0; |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 98 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 99 | for (i = 0; i < data->nrpages; i++) { |
| 100 | if (data->pages[i]) |
| 101 | __free_page(data->pages[i]); |
| 102 | } |
| 103 | kfree(data->pages); |
| 104 | err1: |
| 105 | kfree(data); |
| 106 | return ret; |
| 107 | } |
| 108 | |
| 109 | static void ion_iommu_heap_free(struct ion_buffer *buffer) |
| 110 | { |
| 111 | struct ion_iommu_priv_data *data = buffer->priv_virt; |
| 112 | int i; |
| 113 | |
| 114 | if (!data) |
| 115 | return; |
| 116 | |
| 117 | for (i = 0; i < data->nrpages; i++) |
| 118 | __free_page(data->pages[i]); |
| 119 | |
| 120 | kfree(data->pages); |
| 121 | kfree(data); |
| 122 | } |
| 123 | |
| 124 | void *ion_iommu_heap_map_kernel(struct ion_heap *heap, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 125 | struct ion_buffer *buffer) |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 126 | { |
| 127 | struct ion_iommu_priv_data *data = buffer->priv_virt; |
| 128 | pgprot_t page_prot = PAGE_KERNEL; |
| 129 | |
| 130 | if (!data) |
| 131 | return NULL; |
| 132 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 133 | if (!ION_IS_CACHED(buffer->flags)) |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 134 | page_prot = pgprot_noncached(page_prot); |
| 135 | |
| 136 | buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot); |
| 137 | |
| 138 | return buffer->vaddr; |
| 139 | } |
| 140 | |
| 141 | void ion_iommu_heap_unmap_kernel(struct ion_heap *heap, |
| 142 | struct ion_buffer *buffer) |
| 143 | { |
| 144 | if (!buffer->vaddr) |
| 145 | return; |
| 146 | |
| 147 | vunmap(buffer->vaddr); |
| 148 | buffer->vaddr = NULL; |
| 149 | } |
| 150 | |
| 151 | int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 152 | struct vm_area_struct *vma) |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 153 | { |
| 154 | struct ion_iommu_priv_data *data = buffer->priv_virt; |
| 155 | int i; |
Olav Haugan | dbec7db | 2012-02-25 10:32:41 -0800 | [diff] [blame] | 156 | unsigned long curr_addr; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 157 | if (!data) |
| 158 | return -EINVAL; |
| 159 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 160 | if (!ION_IS_CACHED(buffer->flags)) |
Olav Haugan | de074a7 | 2012-02-22 15:39:54 -0800 | [diff] [blame] | 161 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 162 | |
Olav Haugan | dbec7db | 2012-02-25 10:32:41 -0800 | [diff] [blame] | 163 | curr_addr = vma->vm_start; |
| 164 | for (i = 0; i < data->nrpages && curr_addr < vma->vm_end; i++) { |
| 165 | if (vm_insert_page(vma, curr_addr, data->pages[i])) { |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 166 | /* |
| 167 | * This will fail the mmap which will |
| 168 | * clean up the vma space properly. |
| 169 | */ |
| 170 | return -EINVAL; |
Olav Haugan | dbec7db | 2012-02-25 10:32:41 -0800 | [diff] [blame] | 171 | } |
| 172 | curr_addr += PAGE_SIZE; |
| 173 | } |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 174 | return 0; |
| 175 | } |
| 176 | |
| 177 | int ion_iommu_heap_map_iommu(struct ion_buffer *buffer, |
| 178 | struct ion_iommu_map *data, |
| 179 | unsigned int domain_num, |
| 180 | unsigned int partition_num, |
| 181 | unsigned long align, |
| 182 | unsigned long iova_length, |
| 183 | unsigned long flags) |
| 184 | { |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 185 | struct iommu_domain *domain; |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 186 | int ret = 0; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 187 | unsigned long extra; |
Olav Haugan | f310cf2 | 2012-05-08 08:42:49 -0700 | [diff] [blame] | 188 | int prot = IOMMU_WRITE | IOMMU_READ; |
| 189 | prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 190 | |
| 191 | BUG_ON(!msm_use_iommu()); |
| 192 | |
| 193 | data->mapped_size = iova_length; |
| 194 | extra = iova_length - buffer->size; |
| 195 | |
Laura Abbott | d01221b | 2012-05-16 17:52:49 -0700 | [diff] [blame] | 196 | ret = msm_allocate_iova_address(domain_num, partition_num, |
| 197 | data->mapped_size, align, |
| 198 | &data->iova_addr); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 199 | |
Laura Abbott | e27cdcd | 2012-06-21 07:58:41 -0700 | [diff] [blame] | 200 | if (ret) |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 201 | goto out; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 202 | |
| 203 | domain = msm_get_iommu_domain(domain_num); |
| 204 | |
| 205 | if (!domain) { |
| 206 | ret = -ENOMEM; |
| 207 | goto out1; |
| 208 | } |
| 209 | |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 210 | ret = iommu_map_range(domain, data->iova_addr, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 211 | buffer->sg_table->sgl, |
| 212 | buffer->size, prot); |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 213 | if (ret) { |
| 214 | pr_err("%s: could not map %lx in domain %p\n", |
| 215 | __func__, data->iova_addr, domain); |
| 216 | goto out1; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 217 | } |
| 218 | |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 219 | if (extra) { |
| 220 | unsigned long extra_iova_addr = data->iova_addr + buffer->size; |
Olav Haugan | 8726caf | 2012-05-10 15:11:35 -0700 | [diff] [blame] | 221 | ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K, |
| 222 | prot); |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 223 | if (ret) |
| 224 | goto out2; |
| 225 | } |
| 226 | return ret; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 227 | |
| 228 | out2: |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 229 | iommu_unmap_range(domain, data->iova_addr, buffer->size); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 230 | out1: |
| 231 | msm_free_iova_address(data->iova_addr, domain_num, partition_num, |
| 232 | buffer->size); |
| 233 | |
| 234 | out: |
| 235 | |
| 236 | return ret; |
| 237 | } |
| 238 | |
| 239 | void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data) |
| 240 | { |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 241 | unsigned int domain_num; |
| 242 | unsigned int partition_num; |
| 243 | struct iommu_domain *domain; |
| 244 | |
| 245 | BUG_ON(!msm_use_iommu()); |
| 246 | |
| 247 | domain_num = iommu_map_domain(data); |
| 248 | partition_num = iommu_map_partition(data); |
| 249 | |
| 250 | domain = msm_get_iommu_domain(domain_num); |
| 251 | |
| 252 | if (!domain) { |
| 253 | WARN(1, "Could not get domain %d. Corruption?\n", domain_num); |
| 254 | return; |
| 255 | } |
| 256 | |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 257 | iommu_unmap_range(domain, data->iova_addr, data->mapped_size); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 258 | msm_free_iova_address(data->iova_addr, domain_num, partition_num, |
| 259 | data->mapped_size); |
| 260 | |
| 261 | return; |
| 262 | } |
| 263 | |
Olav Haugan | ef01071 | 2012-03-05 14:19:46 -0800 | [diff] [blame] | 264 | static int ion_iommu_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer, |
| 265 | void *vaddr, unsigned int offset, unsigned int length, |
| 266 | unsigned int cmd) |
| 267 | { |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 268 | void (*outer_cache_op)(phys_addr_t, phys_addr_t); |
| 269 | struct ion_iommu_heap *iommu_heap = |
| 270 | container_of(heap, struct ion_iommu_heap, heap); |
Olav Haugan | ef01071 | 2012-03-05 14:19:46 -0800 | [diff] [blame] | 271 | |
| 272 | switch (cmd) { |
| 273 | case ION_IOC_CLEAN_CACHES: |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 274 | dmac_clean_range(vaddr, vaddr + length); |
| 275 | outer_cache_op = outer_clean_range; |
Olav Haugan | ef01071 | 2012-03-05 14:19:46 -0800 | [diff] [blame] | 276 | break; |
| 277 | case ION_IOC_INV_CACHES: |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 278 | dmac_inv_range(vaddr, vaddr + length); |
| 279 | outer_cache_op = outer_inv_range; |
Olav Haugan | ef01071 | 2012-03-05 14:19:46 -0800 | [diff] [blame] | 280 | break; |
| 281 | case ION_IOC_CLEAN_INV_CACHES: |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 282 | dmac_flush_range(vaddr, vaddr + length); |
| 283 | outer_cache_op = outer_flush_range; |
Olav Haugan | ef01071 | 2012-03-05 14:19:46 -0800 | [diff] [blame] | 284 | break; |
| 285 | default: |
| 286 | return -EINVAL; |
| 287 | } |
| 288 | |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 289 | if (iommu_heap->has_outer_cache) { |
| 290 | unsigned long pstart; |
| 291 | unsigned int i; |
| 292 | struct ion_iommu_priv_data *data = buffer->priv_virt; |
| 293 | if (!data) |
| 294 | return -ENOMEM; |
Olav Haugan | ef01071 | 2012-03-05 14:19:46 -0800 | [diff] [blame] | 295 | |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 296 | for (i = 0; i < data->nrpages; ++i) { |
| 297 | pstart = page_to_phys(data->pages[i]); |
| 298 | outer_cache_op(pstart, pstart + PAGE_SIZE); |
| 299 | } |
| 300 | } |
Olav Haugan | ef01071 | 2012-03-05 14:19:46 -0800 | [diff] [blame] | 301 | return 0; |
| 302 | } |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 303 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 304 | static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap, |
Olav Haugan | ab804b8 | 2012-03-05 14:41:16 -0800 | [diff] [blame] | 305 | struct ion_buffer *buffer) |
| 306 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 307 | return buffer->sg_table; |
Olav Haugan | ab804b8 | 2012-03-05 14:41:16 -0800 | [diff] [blame] | 308 | } |
| 309 | |
| 310 | static void ion_iommu_heap_unmap_dma(struct ion_heap *heap, |
| 311 | struct ion_buffer *buffer) |
| 312 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 313 | if (buffer->sg_table) |
| 314 | sg_free_table(buffer->sg_table); |
| 315 | kfree(buffer->sg_table); |
| 316 | buffer->sg_table = 0; |
Olav Haugan | ab804b8 | 2012-03-05 14:41:16 -0800 | [diff] [blame] | 317 | } |
| 318 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 319 | static struct ion_heap_ops iommu_heap_ops = { |
| 320 | .allocate = ion_iommu_heap_allocate, |
| 321 | .free = ion_iommu_heap_free, |
| 322 | .map_user = ion_iommu_heap_map_user, |
| 323 | .map_kernel = ion_iommu_heap_map_kernel, |
| 324 | .unmap_kernel = ion_iommu_heap_unmap_kernel, |
| 325 | .map_iommu = ion_iommu_heap_map_iommu, |
| 326 | .unmap_iommu = ion_iommu_heap_unmap_iommu, |
Olav Haugan | ef01071 | 2012-03-05 14:19:46 -0800 | [diff] [blame] | 327 | .cache_op = ion_iommu_cache_ops, |
Olav Haugan | ab804b8 | 2012-03-05 14:41:16 -0800 | [diff] [blame] | 328 | .map_dma = ion_iommu_heap_map_dma, |
| 329 | .unmap_dma = ion_iommu_heap_unmap_dma, |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 330 | }; |
| 331 | |
| 332 | struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data) |
| 333 | { |
| 334 | struct ion_iommu_heap *iommu_heap; |
| 335 | |
| 336 | iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL); |
| 337 | if (!iommu_heap) |
| 338 | return ERR_PTR(-ENOMEM); |
| 339 | |
| 340 | iommu_heap->heap.ops = &iommu_heap_ops; |
| 341 | iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU; |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 342 | iommu_heap->has_outer_cache = heap_data->has_outer_cache; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 343 | |
| 344 | return &iommu_heap->heap; |
| 345 | } |
| 346 | |
| 347 | void ion_iommu_heap_destroy(struct ion_heap *heap) |
| 348 | { |
| 349 | struct ion_iommu_heap *iommu_heap = |
| 350 | container_of(heap, struct ion_iommu_heap, heap); |
| 351 | |
| 352 | kfree(iommu_heap); |
| 353 | iommu_heap = NULL; |
| 354 | } |