Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 1 | /* |
Mitchel Humpherys | d1a6903 | 2013-01-31 10:30:40 -0800 | [diff] [blame] | 2 | * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 and |
| 6 | * only version 2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. |
| 12 | */ |
| 13 | #include <linux/err.h> |
| 14 | #include <linux/io.h> |
Mitchel Humpherys | af2e5c5 | 2012-09-06 12:16:36 -0700 | [diff] [blame] | 15 | #include <linux/msm_ion.h> |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 16 | #include <linux/mm.h> |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 17 | #include <linux/highmem.h> |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 18 | #include <linux/scatterlist.h> |
| 19 | #include <linux/slab.h> |
| 20 | #include <linux/vmalloc.h> |
| 21 | #include <linux/iommu.h> |
| 22 | #include <linux/pfn.h> |
Laura Abbott | 60c92c7 | 2012-10-10 13:12:52 -0700 | [diff] [blame] | 23 | #include <linux/dma-mapping.h> |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 24 | #include "ion_priv.h" |
| 25 | |
| 26 | #include <asm/mach/map.h> |
| 27 | #include <asm/page.h> |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 28 | #include <asm/cacheflush.h> |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 29 | #include <mach/iommu_domains.h> |
Adrian Alexei | 21f62bd | 2013-04-22 12:57:41 -0700 | [diff] [blame] | 30 | #include <trace/events/kmem.h> |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 31 | |
| 32 | struct ion_iommu_heap { |
| 33 | struct ion_heap heap; |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 34 | struct ion_page_pool **cached_pools; |
| 35 | struct ion_page_pool **uncached_pools; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 36 | }; |
| 37 | |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 38 | /* |
| 39 | * We will attempt to allocate high-order pages and store those in an |
| 40 | * sg_list. However, some APIs expect an array of struct page * where |
| 41 | * each page is of size PAGE_SIZE. We use this extra structure to |
| 42 | * carry around an array of such pages (derived from the high-order |
| 43 | * pages with nth_page). |
| 44 | */ |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 45 | struct ion_iommu_priv_data { |
| 46 | struct page **pages; |
Olav Haugan | 8909e3b | 2013-05-17 17:09:49 -0700 | [diff] [blame] | 47 | unsigned int pages_uses_vmalloc; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 48 | int nrpages; |
| 49 | unsigned long size; |
| 50 | }; |
| 51 | |
Mitchel Humpherys | d1a6903 | 2013-01-31 10:30:40 -0800 | [diff] [blame] | 52 | #define MAX_VMAP_RETRIES 10 |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 53 | #define BAD_ORDER -1 |
Mitchel Humpherys | d1a6903 | 2013-01-31 10:30:40 -0800 | [diff] [blame] | 54 | |
Chintan Pandya | 1604cf6 | 2013-06-24 18:36:43 -0700 | [diff] [blame] | 55 | static const unsigned int orders[] = {9, 8, 4, 0}; |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 56 | static const int num_orders = ARRAY_SIZE(orders); |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 57 | static unsigned int low_gfp_flags = __GFP_HIGHMEM | GFP_KERNEL | __GFP_ZERO; |
| 58 | static unsigned int high_gfp_flags = (__GFP_HIGHMEM | __GFP_NORETRY |
| 59 | | __GFP_NO_KSWAPD | __GFP_NOWARN | |
| 60 | __GFP_IO | __GFP_FS | __GFP_ZERO); |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 61 | |
| 62 | struct page_info { |
| 63 | struct page *page; |
| 64 | unsigned int order; |
| 65 | struct list_head list; |
| 66 | }; |
| 67 | |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 68 | static int order_to_index(unsigned int order) |
| 69 | { |
| 70 | int i; |
| 71 | for (i = 0; i < num_orders; i++) |
| 72 | if (order == orders[i]) |
| 73 | return i; |
| 74 | BUG(); |
| 75 | return BAD_ORDER; |
| 76 | } |
| 77 | |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 78 | static unsigned int order_to_size(int order) |
| 79 | { |
| 80 | return PAGE_SIZE << order; |
| 81 | } |
| 82 | |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 83 | static struct page_info *alloc_largest_available(struct ion_iommu_heap *heap, |
| 84 | unsigned long size, |
| 85 | unsigned int max_order, |
| 86 | unsigned long flags) |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 87 | { |
| 88 | struct page *page; |
| 89 | struct page_info *info; |
| 90 | int i; |
| 91 | |
| 92 | for (i = 0; i < num_orders; i++) { |
Mitchel Humpherys | ff6ea62 | 2013-02-20 13:55:46 -0800 | [diff] [blame] | 93 | gfp_t gfp; |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 94 | int idx = order_to_index(orders[i]); |
| 95 | struct ion_page_pool *pool; |
| 96 | |
| 97 | if (idx == BAD_ORDER) |
| 98 | continue; |
| 99 | |
| 100 | if (ION_IS_CACHED(flags)) { |
| 101 | pool = heap->cached_pools[idx]; |
| 102 | BUG_ON(!pool); |
| 103 | } else { |
| 104 | pool = heap->uncached_pools[idx]; |
| 105 | BUG_ON(!pool); |
| 106 | } |
| 107 | |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 108 | if (size < order_to_size(orders[i])) |
| 109 | continue; |
| 110 | if (max_order < orders[i]) |
| 111 | continue; |
| 112 | |
Olav Haugan | 041d4b5 | 2013-04-02 14:03:34 -0700 | [diff] [blame] | 113 | if (orders[i]) { |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 114 | gfp = high_gfp_flags; |
Olav Haugan | 041d4b5 | 2013-04-02 14:03:34 -0700 | [diff] [blame] | 115 | } else { |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 116 | gfp = low_gfp_flags; |
Olav Haugan | 041d4b5 | 2013-04-02 14:03:34 -0700 | [diff] [blame] | 117 | } |
Adrian Alexei | 21f62bd | 2013-04-22 12:57:41 -0700 | [diff] [blame] | 118 | trace_alloc_pages_iommu_start(gfp, orders[i]); |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 119 | if (flags & ION_FLAG_POOL_FORCE_ALLOC) |
| 120 | page = alloc_pages(gfp, orders[i]); |
| 121 | else |
| 122 | page = ion_page_pool_alloc(pool); |
Adrian Alexei | 21f62bd | 2013-04-22 12:57:41 -0700 | [diff] [blame] | 123 | trace_alloc_pages_iommu_end(gfp, orders[i]); |
| 124 | if (!page) { |
| 125 | trace_alloc_pages_iommu_fail(gfp, orders[i]); |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 126 | continue; |
Adrian Alexei | 21f62bd | 2013-04-22 12:57:41 -0700 | [diff] [blame] | 127 | } |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 128 | |
| 129 | info = kmalloc(sizeof(struct page_info), GFP_KERNEL); |
Olav Haugan | 6093f3a | 2013-06-26 16:04:33 -0700 | [diff] [blame] | 130 | if (info) { |
| 131 | info->page = page; |
| 132 | info->order = orders[i]; |
| 133 | } |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 134 | return info; |
| 135 | } |
| 136 | return NULL; |
| 137 | } |
| 138 | |
Laura Abbott | c2368fb | 2013-08-12 16:57:23 -0700 | [diff] [blame] | 139 | static int ion_iommu_buffer_zero(struct ion_iommu_priv_data *data, |
| 140 | bool is_cached) |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 141 | { |
Laura Abbott | c2368fb | 2013-08-12 16:57:23 -0700 | [diff] [blame] | 142 | int i, j, k; |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 143 | unsigned int npages_to_vmap; |
| 144 | unsigned int total_pages; |
| 145 | void *ptr = NULL; |
Laura Abbott | c2368fb | 2013-08-12 16:57:23 -0700 | [diff] [blame] | 146 | /* |
| 147 | * It's cheaper just to use writecombine memory and skip the |
| 148 | * cache vs. using a cache memory and trying to flush it afterwards |
| 149 | */ |
| 150 | pgprot_t pgprot = pgprot_writecombine(pgprot_kernel); |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 151 | |
| 152 | /* |
| 153 | * As an optimization, we manually zero out all of the |
| 154 | * pages in one fell swoop here. To safeguard against |
| 155 | * insufficient vmalloc space, we only vmap |
| 156 | * `npages_to_vmap' at a time, starting with a |
| 157 | * conservative estimate of 1/8 of the total number of |
| 158 | * vmalloc pages available. Note that the `pages' |
| 159 | * array is composed of all 4K pages, irrespective of |
| 160 | * the size of the pages on the sg list. |
| 161 | */ |
| 162 | npages_to_vmap = ((VMALLOC_END - VMALLOC_START)/8) |
| 163 | >> PAGE_SHIFT; |
| 164 | total_pages = data->nrpages; |
| 165 | for (i = 0; i < total_pages; i += npages_to_vmap) { |
| 166 | npages_to_vmap = min(npages_to_vmap, total_pages - i); |
| 167 | for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap; |
| 168 | ++j) { |
| 169 | ptr = vmap(&data->pages[i], npages_to_vmap, |
Laura Abbott | c2368fb | 2013-08-12 16:57:23 -0700 | [diff] [blame] | 170 | VM_IOREMAP, pgprot); |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 171 | if (ptr) |
| 172 | break; |
| 173 | else |
| 174 | npages_to_vmap >>= 1; |
| 175 | } |
| 176 | if (!ptr) |
| 177 | return -ENOMEM; |
| 178 | |
| 179 | memset(ptr, 0, npages_to_vmap * PAGE_SIZE); |
Laura Abbott | c2368fb | 2013-08-12 16:57:23 -0700 | [diff] [blame] | 180 | if (is_cached) { |
| 181 | /* |
| 182 | * invalidate the cache to pick up the zeroing |
| 183 | */ |
| 184 | for (k = 0; k < npages_to_vmap; k++) { |
| 185 | void *p = kmap_atomic(data->pages[i + k]); |
| 186 | phys_addr_t phys = page_to_phys( |
| 187 | data->pages[i + k]); |
| 188 | |
| 189 | dmac_inv_range(p, p + PAGE_SIZE); |
| 190 | outer_inv_range(phys, phys + PAGE_SIZE); |
| 191 | kunmap_atomic(p); |
| 192 | } |
| 193 | } |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 194 | vunmap(ptr); |
| 195 | } |
| 196 | |
| 197 | return 0; |
| 198 | } |
| 199 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 200 | static int ion_iommu_heap_allocate(struct ion_heap *heap, |
| 201 | struct ion_buffer *buffer, |
| 202 | unsigned long size, unsigned long align, |
| 203 | unsigned long flags) |
| 204 | { |
| 205 | int ret, i; |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 206 | struct list_head pages_list; |
| 207 | struct page_info *info, *tmp_info; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 208 | struct ion_iommu_priv_data *data = NULL; |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 209 | struct ion_iommu_heap *iommu_heap = |
| 210 | container_of(heap, struct ion_iommu_heap, heap); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 211 | |
| 212 | if (msm_use_iommu()) { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 213 | struct scatterlist *sg; |
| 214 | struct sg_table *table; |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 215 | int j; |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 216 | unsigned int num_large_pages = 0; |
Olav Haugan | d877069 | 2013-04-17 16:11:31 -0700 | [diff] [blame] | 217 | unsigned long size_remaining = PAGE_ALIGN(size); |
Mitchel Humpherys | 1010c7f | 2013-02-22 17:36:16 -0800 | [diff] [blame] | 218 | unsigned int max_order = ION_IS_CACHED(flags) ? 0 : orders[0]; |
Olav Haugan | 8909e3b | 2013-05-17 17:09:49 -0700 | [diff] [blame] | 219 | unsigned int page_tbl_size; |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 220 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 221 | data = kmalloc(sizeof(*data), GFP_KERNEL); |
| 222 | if (!data) |
| 223 | return -ENOMEM; |
| 224 | |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 225 | INIT_LIST_HEAD(&pages_list); |
| 226 | while (size_remaining > 0) { |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 227 | info = alloc_largest_available(iommu_heap, |
| 228 | size_remaining, |
| 229 | max_order, |
| 230 | flags); |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 231 | if (!info) { |
| 232 | ret = -ENOMEM; |
| 233 | goto err_free_data; |
| 234 | } |
| 235 | list_add_tail(&info->list, &pages_list); |
| 236 | size_remaining -= order_to_size(info->order); |
| 237 | max_order = info->order; |
| 238 | num_large_pages++; |
| 239 | } |
| 240 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 241 | data->size = PFN_ALIGN(size); |
| 242 | data->nrpages = data->size >> PAGE_SHIFT; |
Olav Haugan | 8909e3b | 2013-05-17 17:09:49 -0700 | [diff] [blame] | 243 | data->pages_uses_vmalloc = 0; |
| 244 | page_tbl_size = sizeof(struct page *) * data->nrpages; |
| 245 | |
| 246 | if (page_tbl_size > SZ_8K) { |
| 247 | /* |
| 248 | * Do fallback to ensure we have a balance between |
| 249 | * performance and availability. |
| 250 | */ |
| 251 | data->pages = kmalloc(page_tbl_size, |
| 252 | __GFP_COMP | __GFP_NORETRY | |
| 253 | __GFP_NO_KSWAPD | __GFP_NOWARN); |
| 254 | if (!data->pages) { |
| 255 | data->pages = vmalloc(page_tbl_size); |
| 256 | data->pages_uses_vmalloc = 1; |
| 257 | } |
| 258 | } else { |
| 259 | data->pages = kmalloc(page_tbl_size, GFP_KERNEL); |
| 260 | } |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 261 | if (!data->pages) { |
| 262 | ret = -ENOMEM; |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 263 | goto err_free_data; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 264 | } |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 265 | |
| 266 | table = buffer->sg_table = |
| 267 | kzalloc(sizeof(struct sg_table), GFP_KERNEL); |
| 268 | |
| 269 | if (!table) { |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 270 | ret = -ENOMEM; |
| 271 | goto err1; |
| 272 | } |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 273 | ret = sg_alloc_table(table, num_large_pages, GFP_KERNEL); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 274 | if (ret) |
| 275 | goto err2; |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 276 | |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 277 | i = 0; |
| 278 | sg = table->sgl; |
| 279 | list_for_each_entry_safe(info, tmp_info, &pages_list, list) { |
| 280 | struct page *page = info->page; |
| 281 | sg_set_page(sg, page, order_to_size(info->order), 0); |
Laura Abbott | 60c92c7 | 2012-10-10 13:12:52 -0700 | [diff] [blame] | 282 | sg_dma_address(sg) = sg_phys(sg); |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 283 | sg = sg_next(sg); |
| 284 | for (j = 0; j < (1 << info->order); ++j) |
| 285 | data->pages[i++] = nth_page(page, j); |
| 286 | list_del(&info->list); |
| 287 | kfree(info); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 288 | } |
| 289 | |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 290 | |
| 291 | if (flags & ION_FLAG_POOL_FORCE_ALLOC) { |
Laura Abbott | c2368fb | 2013-08-12 16:57:23 -0700 | [diff] [blame] | 292 | ret = ion_iommu_buffer_zero(data, ION_IS_CACHED(flags)); |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 293 | if (ret) { |
Mitchel Humpherys | d1a6903 | 2013-01-31 10:30:40 -0800 | [diff] [blame] | 294 | pr_err("Couldn't vmap the pages for zeroing\n"); |
Mitchel Humpherys | d1a6903 | 2013-01-31 10:30:40 -0800 | [diff] [blame] | 295 | goto err3; |
| 296 | } |
Mitchel Humpherys | d1a6903 | 2013-01-31 10:30:40 -0800 | [diff] [blame] | 297 | |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 298 | |
| 299 | if (!ION_IS_CACHED(flags)) |
| 300 | dma_sync_sg_for_device(NULL, table->sgl, |
| 301 | table->nents, |
Laura Abbott | 60c92c7 | 2012-10-10 13:12:52 -0700 | [diff] [blame] | 302 | DMA_BIDIRECTIONAL); |
| 303 | |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 304 | } |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 305 | buffer->priv_virt = data; |
| 306 | return 0; |
| 307 | |
| 308 | } else { |
| 309 | return -ENOMEM; |
| 310 | } |
| 311 | |
| 312 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 313 | err3: |
| 314 | sg_free_table(buffer->sg_table); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 315 | err2: |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 316 | kfree(buffer->sg_table); |
| 317 | buffer->sg_table = 0; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 318 | err1: |
Olav Haugan | 8909e3b | 2013-05-17 17:09:49 -0700 | [diff] [blame] | 319 | if (data->pages_uses_vmalloc) |
| 320 | vfree(data->pages); |
| 321 | else |
| 322 | kfree(data->pages); |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 323 | err_free_data: |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 324 | kfree(data); |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 325 | |
| 326 | list_for_each_entry_safe(info, tmp_info, &pages_list, list) { |
| 327 | if (info->page) |
| 328 | __free_pages(info->page, info->order); |
| 329 | list_del(&info->list); |
| 330 | kfree(info); |
| 331 | } |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 332 | return ret; |
| 333 | } |
| 334 | |
| 335 | static void ion_iommu_heap_free(struct ion_buffer *buffer) |
| 336 | { |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 337 | int i; |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 338 | struct scatterlist *sg; |
| 339 | struct sg_table *table = buffer->sg_table; |
| 340 | struct ion_iommu_priv_data *data = buffer->priv_virt; |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 341 | bool cached = ion_buffer_cached(buffer); |
| 342 | struct ion_iommu_heap *iommu_heap = |
| 343 | container_of(buffer->heap, struct ion_iommu_heap, heap); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 344 | |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 345 | if (!table) |
| 346 | return; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 347 | if (!data) |
| 348 | return; |
| 349 | |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 350 | if (!(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)) |
Laura Abbott | c2368fb | 2013-08-12 16:57:23 -0700 | [diff] [blame] | 351 | ion_iommu_buffer_zero(data, ION_IS_CACHED(buffer->flags)); |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 352 | |
| 353 | for_each_sg(table->sgl, sg, table->nents, i) { |
| 354 | int order = get_order(sg_dma_len(sg)); |
| 355 | int idx = order_to_index(order); |
| 356 | struct ion_page_pool *pool; |
| 357 | |
| 358 | if (idx == BAD_ORDER) { |
| 359 | WARN_ON(1); |
| 360 | continue; |
| 361 | } |
| 362 | |
| 363 | if (cached) |
| 364 | pool = iommu_heap->cached_pools[idx]; |
| 365 | else |
| 366 | pool = iommu_heap->uncached_pools[idx]; |
| 367 | |
| 368 | if (buffer->flags & ION_FLAG_POOL_FORCE_ALLOC) |
| 369 | __free_pages(sg_page(sg), order); |
| 370 | else |
| 371 | ion_page_pool_free(pool, sg_page(sg)); |
| 372 | } |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 373 | |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 374 | sg_free_table(table); |
| 375 | kfree(table); |
| 376 | table = 0; |
Olav Haugan | 8909e3b | 2013-05-17 17:09:49 -0700 | [diff] [blame] | 377 | if (data->pages_uses_vmalloc) |
| 378 | vfree(data->pages); |
| 379 | else |
| 380 | kfree(data->pages); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 381 | kfree(data); |
| 382 | } |
| 383 | |
| 384 | void *ion_iommu_heap_map_kernel(struct ion_heap *heap, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 385 | struct ion_buffer *buffer) |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 386 | { |
| 387 | struct ion_iommu_priv_data *data = buffer->priv_virt; |
| 388 | pgprot_t page_prot = PAGE_KERNEL; |
| 389 | |
| 390 | if (!data) |
| 391 | return NULL; |
| 392 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 393 | if (!ION_IS_CACHED(buffer->flags)) |
Olav Haugan | 08f911b | 2012-12-13 09:51:59 -0800 | [diff] [blame] | 394 | page_prot = pgprot_writecombine(page_prot); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 395 | |
| 396 | buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot); |
| 397 | |
| 398 | return buffer->vaddr; |
| 399 | } |
| 400 | |
| 401 | void ion_iommu_heap_unmap_kernel(struct ion_heap *heap, |
| 402 | struct ion_buffer *buffer) |
| 403 | { |
| 404 | if (!buffer->vaddr) |
| 405 | return; |
| 406 | |
| 407 | vunmap(buffer->vaddr); |
| 408 | buffer->vaddr = NULL; |
| 409 | } |
| 410 | |
| 411 | int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 412 | struct vm_area_struct *vma) |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 413 | { |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 414 | struct sg_table *table = buffer->sg_table; |
| 415 | unsigned long addr = vma->vm_start; |
| 416 | unsigned long offset = vma->vm_pgoff * PAGE_SIZE; |
| 417 | struct scatterlist *sg; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 418 | int i; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 419 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 420 | if (!ION_IS_CACHED(buffer->flags)) |
Olav Haugan | de074a7 | 2012-02-22 15:39:54 -0800 | [diff] [blame] | 421 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 422 | |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 423 | for_each_sg(table->sgl, sg, table->nents, i) { |
| 424 | struct page *page = sg_page(sg); |
| 425 | unsigned long remainder = vma->vm_end - addr; |
| 426 | unsigned long len = sg_dma_len(sg); |
| 427 | |
| 428 | if (offset >= sg_dma_len(sg)) { |
| 429 | offset -= sg_dma_len(sg); |
| 430 | continue; |
| 431 | } else if (offset) { |
| 432 | page += offset / PAGE_SIZE; |
| 433 | len = sg_dma_len(sg) - offset; |
| 434 | offset = 0; |
Olav Haugan | dbec7db | 2012-02-25 10:32:41 -0800 | [diff] [blame] | 435 | } |
Mitchel Humpherys | b358e07 | 2013-02-01 18:30:14 -0800 | [diff] [blame] | 436 | len = min(len, remainder); |
| 437 | remap_pfn_range(vma, addr, page_to_pfn(page), len, |
| 438 | vma->vm_page_prot); |
| 439 | addr += len; |
| 440 | if (addr >= vma->vm_end) |
| 441 | return 0; |
Olav Haugan | dbec7db | 2012-02-25 10:32:41 -0800 | [diff] [blame] | 442 | } |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 443 | return 0; |
| 444 | } |
| 445 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 446 | static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap, |
Olav Haugan | ab804b8 | 2012-03-05 14:41:16 -0800 | [diff] [blame] | 447 | struct ion_buffer *buffer) |
| 448 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 449 | return buffer->sg_table; |
Olav Haugan | ab804b8 | 2012-03-05 14:41:16 -0800 | [diff] [blame] | 450 | } |
| 451 | |
| 452 | static void ion_iommu_heap_unmap_dma(struct ion_heap *heap, |
| 453 | struct ion_buffer *buffer) |
| 454 | { |
Olav Haugan | ab804b8 | 2012-03-05 14:41:16 -0800 | [diff] [blame] | 455 | } |
| 456 | |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 457 | static int ion_iommu_heap_debug_show(struct ion_heap *heap, struct seq_file *s, |
| 458 | void *unused) |
| 459 | { |
| 460 | |
| 461 | struct ion_iommu_heap *iommu_heap = container_of(heap, |
| 462 | struct ion_iommu_heap, |
| 463 | heap); |
| 464 | int i; |
| 465 | unsigned long total = 0; |
| 466 | |
| 467 | seq_printf(s, "Cached Pools:\n"); |
| 468 | for (i = 0; i < num_orders; i++) { |
| 469 | struct ion_page_pool *pool = iommu_heap->cached_pools[i]; |
| 470 | seq_printf(s, "%d order %u highmem pages in pool = %lx total\n", |
| 471 | pool->high_count, pool->order, |
| 472 | (1 << pool->order) * PAGE_SIZE * pool->high_count); |
| 473 | seq_printf(s, "%d order %u lowmem pages in pool = %lx total\n", |
| 474 | pool->low_count, pool->order, |
| 475 | (1 << pool->order) * PAGE_SIZE * pool->low_count); |
| 476 | |
| 477 | total += (1 << pool->order) * PAGE_SIZE * |
| 478 | (pool->low_count + pool->high_count); |
| 479 | } |
| 480 | |
| 481 | seq_printf(s, "Uncached Pools:\n"); |
| 482 | for (i = 0; i < num_orders; i++) { |
| 483 | struct ion_page_pool *pool = iommu_heap->uncached_pools[i]; |
| 484 | seq_printf(s, "%d order %u highmem pages in pool = %lx total\n", |
| 485 | pool->high_count, pool->order, |
| 486 | (1 << pool->order) * PAGE_SIZE * pool->high_count); |
| 487 | seq_printf(s, "%d order %u lowmem pages in pool = %lx total\n", |
| 488 | pool->low_count, pool->order, |
| 489 | (1 << pool->order) * PAGE_SIZE * pool->low_count); |
| 490 | |
| 491 | total += (1 << pool->order) * PAGE_SIZE * |
| 492 | (pool->low_count + pool->high_count); |
| 493 | } |
| 494 | seq_printf(s, "Total bytes in pool: %lx\n", total); |
| 495 | return 0; |
| 496 | } |
| 497 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 498 | static struct ion_heap_ops iommu_heap_ops = { |
| 499 | .allocate = ion_iommu_heap_allocate, |
| 500 | .free = ion_iommu_heap_free, |
| 501 | .map_user = ion_iommu_heap_map_user, |
| 502 | .map_kernel = ion_iommu_heap_map_kernel, |
| 503 | .unmap_kernel = ion_iommu_heap_unmap_kernel, |
Olav Haugan | ab804b8 | 2012-03-05 14:41:16 -0800 | [diff] [blame] | 504 | .map_dma = ion_iommu_heap_map_dma, |
| 505 | .unmap_dma = ion_iommu_heap_unmap_dma, |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 506 | }; |
| 507 | |
| 508 | struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data) |
| 509 | { |
| 510 | struct ion_iommu_heap *iommu_heap; |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 511 | int i; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 512 | |
| 513 | iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL); |
| 514 | if (!iommu_heap) |
| 515 | return ERR_PTR(-ENOMEM); |
| 516 | |
| 517 | iommu_heap->heap.ops = &iommu_heap_ops; |
| 518 | iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU; |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 519 | iommu_heap->uncached_pools = kzalloc( |
| 520 | sizeof(struct ion_page_pool *) * num_orders, |
| 521 | GFP_KERNEL); |
| 522 | if (!iommu_heap->uncached_pools) |
| 523 | goto err_alloc_uncached_pools; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 524 | |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 525 | iommu_heap->cached_pools = kzalloc( |
| 526 | sizeof(struct ion_page_pool *) * num_orders, |
| 527 | GFP_KERNEL); |
| 528 | |
| 529 | if (!iommu_heap->cached_pools) |
| 530 | goto err_alloc_cached_pools; |
| 531 | |
| 532 | for (i = 0; i < num_orders; i++) { |
| 533 | struct ion_page_pool *pool; |
| 534 | gfp_t gfp_flags; |
| 535 | |
| 536 | if (orders[i]) |
| 537 | gfp_flags = high_gfp_flags | __GFP_ZERO; |
| 538 | else |
| 539 | gfp_flags = low_gfp_flags | __GFP_ZERO; |
| 540 | pool = ion_page_pool_create(gfp_flags, orders[i]); |
| 541 | if (!pool) |
| 542 | goto err_create_cached_pool; |
| 543 | iommu_heap->cached_pools[i] = pool; |
| 544 | } |
| 545 | |
| 546 | for (i = 0; i < num_orders; i++) { |
| 547 | struct ion_page_pool *pool; |
| 548 | gfp_t gfp_flags; |
| 549 | |
| 550 | if (orders[i]) |
| 551 | gfp_flags = high_gfp_flags | __GFP_ZERO; |
| 552 | else |
| 553 | gfp_flags = low_gfp_flags | __GFP_ZERO; |
| 554 | pool = ion_page_pool_create(gfp_flags, orders[i]); |
| 555 | if (!pool) |
| 556 | goto err_create_uncached_pool; |
| 557 | iommu_heap->uncached_pools[i] = pool; |
| 558 | } |
| 559 | iommu_heap->heap.debug_show = ion_iommu_heap_debug_show; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 560 | return &iommu_heap->heap; |
Laura Abbott | 5d9cca9 | 2013-06-06 14:09:41 -0700 | [diff] [blame] | 561 | |
| 562 | err_create_uncached_pool: |
| 563 | for (i = 0; i < num_orders; i++) |
| 564 | if (iommu_heap->cached_pools[i]) |
| 565 | ion_page_pool_destroy(iommu_heap->uncached_pools[i]); |
| 566 | |
| 567 | |
| 568 | err_create_cached_pool: |
| 569 | for (i = 0; i < num_orders; i++) |
| 570 | if (iommu_heap->uncached_pools[i]) |
| 571 | ion_page_pool_destroy(iommu_heap->cached_pools[i]); |
| 572 | |
| 573 | kfree(iommu_heap->cached_pools); |
| 574 | err_alloc_cached_pools: |
| 575 | kfree(iommu_heap->uncached_pools); |
| 576 | err_alloc_uncached_pools: |
| 577 | kfree(iommu_heap); |
| 578 | return ERR_PTR(-ENOMEM); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 579 | } |
| 580 | |
| 581 | void ion_iommu_heap_destroy(struct ion_heap *heap) |
| 582 | { |
| 583 | struct ion_iommu_heap *iommu_heap = |
| 584 | container_of(heap, struct ion_iommu_heap, heap); |
| 585 | |
| 586 | kfree(iommu_heap); |
| 587 | iommu_heap = NULL; |
| 588 | } |