Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 1 | /* |
Mitchel Humpherys | d1a6903 | 2013-01-31 10:30:40 -0800 | [diff] [blame] | 2 | * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 and |
| 6 | * only version 2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. |
| 12 | */ |
| 13 | #include <linux/err.h> |
| 14 | #include <linux/io.h> |
Mitchel Humpherys | af2e5c5 | 2012-09-06 12:16:36 -0700 | [diff] [blame] | 15 | #include <linux/msm_ion.h> |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 16 | #include <linux/mm.h> |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 17 | #include <linux/highmem.h> |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 18 | #include <linux/scatterlist.h> |
| 19 | #include <linux/slab.h> |
| 20 | #include <linux/vmalloc.h> |
| 21 | #include <linux/iommu.h> |
| 22 | #include <linux/pfn.h> |
Laura Abbott | 60c92c7 | 2012-10-10 13:12:52 -0700 | [diff] [blame] | 23 | #include <linux/dma-mapping.h> |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 24 | #include "ion_priv.h" |
| 25 | |
| 26 | #include <asm/mach/map.h> |
| 27 | #include <asm/page.h> |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 28 | #include <asm/cacheflush.h> |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 29 | #include <mach/iommu_domains.h> |
| 30 | |
| 31 | struct ion_iommu_heap { |
| 32 | struct ion_heap heap; |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 33 | unsigned int has_outer_cache; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 34 | }; |
| 35 | |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 36 | /* |
| 37 | * We will attempt to allocate high-order pages and store those in an |
| 38 | * sg_list. However, some APIs expect an array of struct page * where |
| 39 | * each page is of size PAGE_SIZE. We use this extra structure to |
| 40 | * carry around an array of such pages (derived from the high-order |
| 41 | * pages with nth_page). |
| 42 | */ |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 43 | struct ion_iommu_priv_data { |
| 44 | struct page **pages; |
| 45 | int nrpages; |
| 46 | unsigned long size; |
| 47 | }; |
| 48 | |
Mitchel Humpherys | d1a6903 | 2013-01-31 10:30:40 -0800 | [diff] [blame] | 49 | #define MAX_VMAP_RETRIES 10 |
| 50 | |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 51 | static const unsigned int orders[] = {8, 4, 0}; |
| 52 | static const int num_orders = ARRAY_SIZE(orders); |
| 53 | |
| 54 | struct page_info { |
| 55 | struct page *page; |
| 56 | unsigned int order; |
| 57 | struct list_head list; |
| 58 | }; |
| 59 | |
| 60 | static unsigned int order_to_size(int order) |
| 61 | { |
| 62 | return PAGE_SIZE << order; |
| 63 | } |
| 64 | |
| 65 | static struct page_info *alloc_largest_available(unsigned long size, |
| 66 | unsigned int max_order) |
| 67 | { |
| 68 | struct page *page; |
| 69 | struct page_info *info; |
| 70 | int i; |
| 71 | |
| 72 | for (i = 0; i < num_orders; i++) { |
Mitchel Humpherys | ff6ea62 | 2013-02-20 13:55:46 -0800 | [diff] [blame] | 73 | gfp_t gfp; |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 74 | if (size < order_to_size(orders[i])) |
| 75 | continue; |
| 76 | if (max_order < orders[i]) |
| 77 | continue; |
| 78 | |
Olav Haugan | 041d4b5 | 2013-04-02 14:03:34 -0700 | [diff] [blame^] | 79 | gfp = __GFP_HIGHMEM; |
Mitchel Humpherys | ff6ea62 | 2013-02-20 13:55:46 -0800 | [diff] [blame] | 80 | |
Olav Haugan | 041d4b5 | 2013-04-02 14:03:34 -0700 | [diff] [blame^] | 81 | if (orders[i]) { |
| 82 | gfp |= __GFP_COMP | __GFP_NORETRY | |
| 83 | __GFP_NO_KSWAPD | __GFP_NOWARN; |
| 84 | } else { |
| 85 | gfp |= GFP_KERNEL; |
| 86 | } |
Mitchel Humpherys | ff6ea62 | 2013-02-20 13:55:46 -0800 | [diff] [blame] | 87 | page = alloc_pages(gfp, orders[i]); |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 88 | if (!page) |
| 89 | continue; |
| 90 | |
| 91 | info = kmalloc(sizeof(struct page_info), GFP_KERNEL); |
| 92 | info->page = page; |
| 93 | info->order = orders[i]; |
| 94 | return info; |
| 95 | } |
| 96 | return NULL; |
| 97 | } |
| 98 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 99 | static int ion_iommu_heap_allocate(struct ion_heap *heap, |
| 100 | struct ion_buffer *buffer, |
| 101 | unsigned long size, unsigned long align, |
| 102 | unsigned long flags) |
| 103 | { |
| 104 | int ret, i; |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 105 | struct list_head pages_list; |
| 106 | struct page_info *info, *tmp_info; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 107 | struct ion_iommu_priv_data *data = NULL; |
| 108 | |
| 109 | if (msm_use_iommu()) { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 110 | struct scatterlist *sg; |
| 111 | struct sg_table *table; |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 112 | int j; |
Mitchel Humpherys | d1a6903 | 2013-01-31 10:30:40 -0800 | [diff] [blame] | 113 | void *ptr = NULL; |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 114 | unsigned int npages_to_vmap, total_pages, num_large_pages = 0; |
| 115 | long size_remaining = PAGE_ALIGN(size); |
Mitchel Humpherys | 1010c7f | 2013-02-22 17:36:16 -0800 | [diff] [blame] | 116 | unsigned int max_order = ION_IS_CACHED(flags) ? 0 : orders[0]; |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 117 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 118 | data = kmalloc(sizeof(*data), GFP_KERNEL); |
| 119 | if (!data) |
| 120 | return -ENOMEM; |
| 121 | |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 122 | INIT_LIST_HEAD(&pages_list); |
| 123 | while (size_remaining > 0) { |
| 124 | info = alloc_largest_available(size_remaining, |
| 125 | max_order); |
| 126 | if (!info) { |
| 127 | ret = -ENOMEM; |
| 128 | goto err_free_data; |
| 129 | } |
| 130 | list_add_tail(&info->list, &pages_list); |
| 131 | size_remaining -= order_to_size(info->order); |
| 132 | max_order = info->order; |
| 133 | num_large_pages++; |
| 134 | } |
| 135 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 136 | data->size = PFN_ALIGN(size); |
| 137 | data->nrpages = data->size >> PAGE_SHIFT; |
| 138 | data->pages = kzalloc(sizeof(struct page *)*data->nrpages, |
| 139 | GFP_KERNEL); |
| 140 | if (!data->pages) { |
| 141 | ret = -ENOMEM; |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 142 | goto err_free_data; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 143 | } |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 144 | |
| 145 | table = buffer->sg_table = |
| 146 | kzalloc(sizeof(struct sg_table), GFP_KERNEL); |
| 147 | |
| 148 | if (!table) { |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 149 | ret = -ENOMEM; |
| 150 | goto err1; |
| 151 | } |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 152 | ret = sg_alloc_table(table, num_large_pages, GFP_KERNEL); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 153 | if (ret) |
| 154 | goto err2; |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 155 | |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 156 | i = 0; |
| 157 | sg = table->sgl; |
| 158 | list_for_each_entry_safe(info, tmp_info, &pages_list, list) { |
| 159 | struct page *page = info->page; |
| 160 | sg_set_page(sg, page, order_to_size(info->order), 0); |
Laura Abbott | 60c92c7 | 2012-10-10 13:12:52 -0700 | [diff] [blame] | 161 | sg_dma_address(sg) = sg_phys(sg); |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 162 | sg = sg_next(sg); |
| 163 | for (j = 0; j < (1 << info->order); ++j) |
| 164 | data->pages[i++] = nth_page(page, j); |
| 165 | list_del(&info->list); |
| 166 | kfree(info); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 167 | } |
| 168 | |
Mitchel Humpherys | d1a6903 | 2013-01-31 10:30:40 -0800 | [diff] [blame] | 169 | /* |
| 170 | * As an optimization, we omit __GFP_ZERO from |
| 171 | * alloc_page above and manually zero out all of the |
| 172 | * pages in one fell swoop here. To safeguard against |
| 173 | * insufficient vmalloc space, we only vmap |
| 174 | * `npages_to_vmap' at a time, starting with a |
| 175 | * conservative estimate of 1/8 of the total number of |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 176 | * vmalloc pages available. Note that the `pages' |
| 177 | * array is composed of all 4K pages, irrespective of |
| 178 | * the size of the pages on the sg list. |
Mitchel Humpherys | d1a6903 | 2013-01-31 10:30:40 -0800 | [diff] [blame] | 179 | */ |
| 180 | npages_to_vmap = ((VMALLOC_END - VMALLOC_START)/8) |
| 181 | >> PAGE_SHIFT; |
| 182 | total_pages = data->nrpages; |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 183 | for (i = 0; i < total_pages; i += npages_to_vmap) { |
| 184 | npages_to_vmap = min(npages_to_vmap, total_pages - i); |
| 185 | for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap; |
| 186 | ++j) { |
| 187 | ptr = vmap(&data->pages[i], npages_to_vmap, |
Mitchel Humpherys | d1a6903 | 2013-01-31 10:30:40 -0800 | [diff] [blame] | 188 | VM_IOREMAP, pgprot_kernel); |
| 189 | if (ptr) |
| 190 | break; |
| 191 | else |
| 192 | npages_to_vmap >>= 1; |
| 193 | } |
| 194 | if (!ptr) { |
| 195 | pr_err("Couldn't vmap the pages for zeroing\n"); |
| 196 | ret = -ENOMEM; |
| 197 | goto err3; |
| 198 | } |
| 199 | memset(ptr, 0, npages_to_vmap * PAGE_SIZE); |
| 200 | vunmap(ptr); |
| 201 | } |
| 202 | |
Laura Abbott | 60c92c7 | 2012-10-10 13:12:52 -0700 | [diff] [blame] | 203 | if (!ION_IS_CACHED(flags)) |
| 204 | dma_sync_sg_for_device(NULL, table->sgl, table->nents, |
| 205 | DMA_BIDIRECTIONAL); |
| 206 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 207 | buffer->priv_virt = data; |
| 208 | return 0; |
| 209 | |
| 210 | } else { |
| 211 | return -ENOMEM; |
| 212 | } |
| 213 | |
| 214 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 215 | err3: |
| 216 | sg_free_table(buffer->sg_table); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 217 | err2: |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 218 | kfree(buffer->sg_table); |
| 219 | buffer->sg_table = 0; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 220 | err1: |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 221 | kfree(data->pages); |
| 222 | err_free_data: |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 223 | kfree(data); |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 224 | |
| 225 | list_for_each_entry_safe(info, tmp_info, &pages_list, list) { |
| 226 | if (info->page) |
| 227 | __free_pages(info->page, info->order); |
| 228 | list_del(&info->list); |
| 229 | kfree(info); |
| 230 | } |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 231 | return ret; |
| 232 | } |
| 233 | |
| 234 | static void ion_iommu_heap_free(struct ion_buffer *buffer) |
| 235 | { |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 236 | int i; |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 237 | struct scatterlist *sg; |
| 238 | struct sg_table *table = buffer->sg_table; |
| 239 | struct ion_iommu_priv_data *data = buffer->priv_virt; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 240 | |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 241 | if (!table) |
| 242 | return; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 243 | if (!data) |
| 244 | return; |
| 245 | |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 246 | for_each_sg(table->sgl, sg, table->nents, i) |
| 247 | __free_pages(sg_page(sg), get_order(sg_dma_len(sg))); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 248 | |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 249 | sg_free_table(table); |
| 250 | kfree(table); |
| 251 | table = 0; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 252 | kfree(data->pages); |
| 253 | kfree(data); |
| 254 | } |
| 255 | |
| 256 | void *ion_iommu_heap_map_kernel(struct ion_heap *heap, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 257 | struct ion_buffer *buffer) |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 258 | { |
| 259 | struct ion_iommu_priv_data *data = buffer->priv_virt; |
| 260 | pgprot_t page_prot = PAGE_KERNEL; |
| 261 | |
| 262 | if (!data) |
| 263 | return NULL; |
| 264 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 265 | if (!ION_IS_CACHED(buffer->flags)) |
Olav Haugan | 08f911b | 2012-12-13 09:51:59 -0800 | [diff] [blame] | 266 | page_prot = pgprot_writecombine(page_prot); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 267 | |
| 268 | buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot); |
| 269 | |
| 270 | return buffer->vaddr; |
| 271 | } |
| 272 | |
| 273 | void ion_iommu_heap_unmap_kernel(struct ion_heap *heap, |
| 274 | struct ion_buffer *buffer) |
| 275 | { |
| 276 | if (!buffer->vaddr) |
| 277 | return; |
| 278 | |
| 279 | vunmap(buffer->vaddr); |
| 280 | buffer->vaddr = NULL; |
| 281 | } |
| 282 | |
| 283 | int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 284 | struct vm_area_struct *vma) |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 285 | { |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 286 | struct sg_table *table = buffer->sg_table; |
| 287 | unsigned long addr = vma->vm_start; |
| 288 | unsigned long offset = vma->vm_pgoff * PAGE_SIZE; |
| 289 | struct scatterlist *sg; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 290 | int i; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 291 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 292 | if (!ION_IS_CACHED(buffer->flags)) |
Olav Haugan | de074a7 | 2012-02-22 15:39:54 -0800 | [diff] [blame] | 293 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 294 | |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 295 | for_each_sg(table->sgl, sg, table->nents, i) { |
| 296 | struct page *page = sg_page(sg); |
| 297 | unsigned long remainder = vma->vm_end - addr; |
| 298 | unsigned long len = sg_dma_len(sg); |
| 299 | |
| 300 | if (offset >= sg_dma_len(sg)) { |
| 301 | offset -= sg_dma_len(sg); |
| 302 | continue; |
| 303 | } else if (offset) { |
| 304 | page += offset / PAGE_SIZE; |
| 305 | len = sg_dma_len(sg) - offset; |
| 306 | offset = 0; |
Olav Haugan | dbec7db | 2012-02-25 10:32:41 -0800 | [diff] [blame] | 307 | } |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 308 | len = min(len, remainder); |
| 309 | remap_pfn_range(vma, addr, page_to_pfn(page), len, |
| 310 | vma->vm_page_prot); |
| 311 | addr += len; |
| 312 | if (addr >= vma->vm_end) |
| 313 | return 0; |
Olav Haugan | dbec7db | 2012-02-25 10:32:41 -0800 | [diff] [blame] | 314 | } |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 315 | return 0; |
| 316 | } |
| 317 | |
| 318 | int ion_iommu_heap_map_iommu(struct ion_buffer *buffer, |
| 319 | struct ion_iommu_map *data, |
| 320 | unsigned int domain_num, |
| 321 | unsigned int partition_num, |
| 322 | unsigned long align, |
| 323 | unsigned long iova_length, |
| 324 | unsigned long flags) |
| 325 | { |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 326 | struct iommu_domain *domain; |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 327 | int ret = 0; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 328 | unsigned long extra; |
Olav Haugan | f310cf2 | 2012-05-08 08:42:49 -0700 | [diff] [blame] | 329 | int prot = IOMMU_WRITE | IOMMU_READ; |
| 330 | prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 331 | |
| 332 | BUG_ON(!msm_use_iommu()); |
| 333 | |
| 334 | data->mapped_size = iova_length; |
| 335 | extra = iova_length - buffer->size; |
| 336 | |
Olav Haugan | 7237a07 | 2013-03-19 17:37:50 -0700 | [diff] [blame] | 337 | /* Use the biggest alignment to allow bigger IOMMU mappings. |
| 338 | * Use the first entry since the first entry will always be the |
| 339 | * biggest entry. To take advantage of bigger mapping sizes both the |
| 340 | * VA and PA addresses have to be aligned to the biggest size. |
| 341 | */ |
| 342 | if (buffer->sg_table->sgl->length > align) |
| 343 | align = buffer->sg_table->sgl->length; |
| 344 | |
Laura Abbott | d01221b | 2012-05-16 17:52:49 -0700 | [diff] [blame] | 345 | ret = msm_allocate_iova_address(domain_num, partition_num, |
| 346 | data->mapped_size, align, |
| 347 | &data->iova_addr); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 348 | |
Laura Abbott | e27cdcd | 2012-06-21 07:58:41 -0700 | [diff] [blame] | 349 | if (ret) |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 350 | goto out; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 351 | |
| 352 | domain = msm_get_iommu_domain(domain_num); |
| 353 | |
| 354 | if (!domain) { |
| 355 | ret = -ENOMEM; |
| 356 | goto out1; |
| 357 | } |
| 358 | |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 359 | ret = iommu_map_range(domain, data->iova_addr, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 360 | buffer->sg_table->sgl, |
| 361 | buffer->size, prot); |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 362 | if (ret) { |
| 363 | pr_err("%s: could not map %lx in domain %p\n", |
| 364 | __func__, data->iova_addr, domain); |
| 365 | goto out1; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 366 | } |
| 367 | |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 368 | if (extra) { |
| 369 | unsigned long extra_iova_addr = data->iova_addr + buffer->size; |
Mitchel Humpherys | af3b522 | 2013-01-15 15:38:52 -0800 | [diff] [blame] | 370 | unsigned long phys_addr = sg_phys(buffer->sg_table->sgl); |
| 371 | ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr, |
| 372 | extra, SZ_4K, prot); |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 373 | if (ret) |
| 374 | goto out2; |
| 375 | } |
| 376 | return ret; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 377 | |
| 378 | out2: |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 379 | iommu_unmap_range(domain, data->iova_addr, buffer->size); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 380 | out1: |
| 381 | msm_free_iova_address(data->iova_addr, domain_num, partition_num, |
| 382 | buffer->size); |
| 383 | |
| 384 | out: |
| 385 | |
| 386 | return ret; |
| 387 | } |
| 388 | |
| 389 | void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data) |
| 390 | { |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 391 | unsigned int domain_num; |
| 392 | unsigned int partition_num; |
| 393 | struct iommu_domain *domain; |
| 394 | |
| 395 | BUG_ON(!msm_use_iommu()); |
| 396 | |
| 397 | domain_num = iommu_map_domain(data); |
| 398 | partition_num = iommu_map_partition(data); |
| 399 | |
| 400 | domain = msm_get_iommu_domain(domain_num); |
| 401 | |
| 402 | if (!domain) { |
| 403 | WARN(1, "Could not get domain %d. Corruption?\n", domain_num); |
| 404 | return; |
| 405 | } |
| 406 | |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 407 | iommu_unmap_range(domain, data->iova_addr, data->mapped_size); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 408 | msm_free_iova_address(data->iova_addr, domain_num, partition_num, |
| 409 | data->mapped_size); |
| 410 | |
| 411 | return; |
| 412 | } |
| 413 | |
Olav Haugan | ef01071 | 2012-03-05 14:19:46 -0800 | [diff] [blame] | 414 | static int ion_iommu_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer, |
| 415 | void *vaddr, unsigned int offset, unsigned int length, |
| 416 | unsigned int cmd) |
| 417 | { |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 418 | void (*outer_cache_op)(phys_addr_t, phys_addr_t); |
| 419 | struct ion_iommu_heap *iommu_heap = |
| 420 | container_of(heap, struct ion_iommu_heap, heap); |
Olav Haugan | ef01071 | 2012-03-05 14:19:46 -0800 | [diff] [blame] | 421 | |
| 422 | switch (cmd) { |
| 423 | case ION_IOC_CLEAN_CACHES: |
Neeti Desai | 3f3c282 | 2013-03-08 17:29:53 -0800 | [diff] [blame] | 424 | if (!vaddr) |
| 425 | dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, |
| 426 | buffer->sg_table->nents, DMA_TO_DEVICE); |
| 427 | else |
| 428 | dmac_clean_range(vaddr, vaddr + length); |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 429 | outer_cache_op = outer_clean_range; |
Olav Haugan | ef01071 | 2012-03-05 14:19:46 -0800 | [diff] [blame] | 430 | break; |
| 431 | case ION_IOC_INV_CACHES: |
Neeti Desai | 3f3c282 | 2013-03-08 17:29:53 -0800 | [diff] [blame] | 432 | if (!vaddr) |
| 433 | dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl, |
| 434 | buffer->sg_table->nents, DMA_FROM_DEVICE); |
| 435 | else |
| 436 | dmac_inv_range(vaddr, vaddr + length); |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 437 | outer_cache_op = outer_inv_range; |
Olav Haugan | ef01071 | 2012-03-05 14:19:46 -0800 | [diff] [blame] | 438 | break; |
| 439 | case ION_IOC_CLEAN_INV_CACHES: |
Neeti Desai | 3f3c282 | 2013-03-08 17:29:53 -0800 | [diff] [blame] | 440 | if (!vaddr) { |
| 441 | dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, |
| 442 | buffer->sg_table->nents, DMA_TO_DEVICE); |
| 443 | dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl, |
| 444 | buffer->sg_table->nents, DMA_FROM_DEVICE); |
| 445 | } else { |
| 446 | dmac_flush_range(vaddr, vaddr + length); |
| 447 | } |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 448 | outer_cache_op = outer_flush_range; |
Olav Haugan | ef01071 | 2012-03-05 14:19:46 -0800 | [diff] [blame] | 449 | break; |
| 450 | default: |
| 451 | return -EINVAL; |
| 452 | } |
| 453 | |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 454 | if (iommu_heap->has_outer_cache) { |
| 455 | unsigned long pstart; |
| 456 | unsigned int i; |
| 457 | struct ion_iommu_priv_data *data = buffer->priv_virt; |
| 458 | if (!data) |
| 459 | return -ENOMEM; |
Olav Haugan | ef01071 | 2012-03-05 14:19:46 -0800 | [diff] [blame] | 460 | |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 461 | for (i = 0; i < data->nrpages; ++i) { |
| 462 | pstart = page_to_phys(data->pages[i]); |
| 463 | outer_cache_op(pstart, pstart + PAGE_SIZE); |
| 464 | } |
| 465 | } |
Olav Haugan | ef01071 | 2012-03-05 14:19:46 -0800 | [diff] [blame] | 466 | return 0; |
| 467 | } |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 468 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 469 | static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap, |
Olav Haugan | ab804b8 | 2012-03-05 14:41:16 -0800 | [diff] [blame] | 470 | struct ion_buffer *buffer) |
| 471 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 472 | return buffer->sg_table; |
Olav Haugan | ab804b8 | 2012-03-05 14:41:16 -0800 | [diff] [blame] | 473 | } |
| 474 | |
| 475 | static void ion_iommu_heap_unmap_dma(struct ion_heap *heap, |
| 476 | struct ion_buffer *buffer) |
| 477 | { |
Olav Haugan | ab804b8 | 2012-03-05 14:41:16 -0800 | [diff] [blame] | 478 | } |
| 479 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 480 | static struct ion_heap_ops iommu_heap_ops = { |
| 481 | .allocate = ion_iommu_heap_allocate, |
| 482 | .free = ion_iommu_heap_free, |
| 483 | .map_user = ion_iommu_heap_map_user, |
| 484 | .map_kernel = ion_iommu_heap_map_kernel, |
| 485 | .unmap_kernel = ion_iommu_heap_unmap_kernel, |
| 486 | .map_iommu = ion_iommu_heap_map_iommu, |
| 487 | .unmap_iommu = ion_iommu_heap_unmap_iommu, |
Olav Haugan | ef01071 | 2012-03-05 14:19:46 -0800 | [diff] [blame] | 488 | .cache_op = ion_iommu_cache_ops, |
Olav Haugan | ab804b8 | 2012-03-05 14:41:16 -0800 | [diff] [blame] | 489 | .map_dma = ion_iommu_heap_map_dma, |
| 490 | .unmap_dma = ion_iommu_heap_unmap_dma, |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 491 | }; |
| 492 | |
| 493 | struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data) |
| 494 | { |
| 495 | struct ion_iommu_heap *iommu_heap; |
| 496 | |
| 497 | iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL); |
| 498 | if (!iommu_heap) |
| 499 | return ERR_PTR(-ENOMEM); |
| 500 | |
| 501 | iommu_heap->heap.ops = &iommu_heap_ops; |
| 502 | iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU; |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 503 | iommu_heap->has_outer_cache = heap_data->has_outer_cache; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 504 | |
| 505 | return &iommu_heap->heap; |
| 506 | } |
| 507 | |
| 508 | void ion_iommu_heap_destroy(struct ion_heap *heap) |
| 509 | { |
| 510 | struct ion_iommu_heap *iommu_heap = |
| 511 | container_of(heap, struct ion_iommu_heap, heap); |
| 512 | |
| 513 | kfree(iommu_heap); |
| 514 | iommu_heap = NULL; |
| 515 | } |