Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1 | /* |
| 2 | * drivers/staging/android/ion/ion_heap.c |
| 3 | * |
| 4 | * Copyright (C) 2011 Google, Inc. |
| 5 | * |
| 6 | * This software is licensed under the terms of the GNU General Public |
| 7 | * License version 2, as published by the Free Software Foundation, and |
| 8 | * may be copied, distributed, and modified under those terms. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | */ |
| 16 | |
| 17 | #include <linux/err.h> |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 18 | #include <linux/freezer.h> |
| 19 | #include <linux/kthread.h> |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 20 | #include <linux/mm.h> |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 21 | #include <linux/rtmutex.h> |
| 22 | #include <linux/sched.h> |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 23 | #include <linux/scatterlist.h> |
| 24 | #include <linux/vmalloc.h> |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 25 | #include "ion.h" |
| 26 | #include "ion_priv.h" |
| 27 | |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 28 | void *ion_heap_map_kernel(struct ion_heap *heap, |
| 29 | struct ion_buffer *buffer) |
| 30 | { |
| 31 | struct scatterlist *sg; |
| 32 | int i, j; |
| 33 | void *vaddr; |
| 34 | pgprot_t pgprot; |
| 35 | struct sg_table *table = buffer->sg_table; |
| 36 | int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; |
| 37 | struct page **pages = vmalloc(sizeof(struct page *) * npages); |
| 38 | struct page **tmp = pages; |
| 39 | |
| 40 | if (!pages) |
Laura Abbott | 06bef9e | 2018-06-11 11:06:53 -0700 | [diff] [blame] | 41 | return ERR_PTR(-ENOMEM); |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 42 | |
| 43 | if (buffer->flags & ION_FLAG_CACHED) |
| 44 | pgprot = PAGE_KERNEL; |
| 45 | else |
| 46 | pgprot = pgprot_writecombine(PAGE_KERNEL); |
| 47 | |
| 48 | for_each_sg(table->sgl, sg, table->nents, i) { |
Colin Cross | 06e0dca | 2013-12-13 14:25:02 -0800 | [diff] [blame] | 49 | int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE; |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 50 | struct page *page = sg_page(sg); |
Seunghun Lee | 10f6286 | 2014-05-01 01:30:23 +0900 | [diff] [blame] | 51 | |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 52 | BUG_ON(i >= npages); |
John Stultz | e1d855b | 2013-12-13 19:26:33 -0800 | [diff] [blame] | 53 | for (j = 0; j < npages_this_entry; j++) |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 54 | *(tmp++) = page++; |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 55 | } |
| 56 | vaddr = vmap(pages, npages, VM_MAP, pgprot); |
| 57 | vfree(pages); |
| 58 | |
Muhammad Falak R Wani | cf31378 | 2015-10-19 22:37:51 +0530 | [diff] [blame] | 59 | if (!vaddr) |
Colin Cross | dfc4a9b | 2013-12-13 14:24:48 -0800 | [diff] [blame] | 60 | return ERR_PTR(-ENOMEM); |
| 61 | |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 62 | return vaddr; |
| 63 | } |
| 64 | |
| 65 | void ion_heap_unmap_kernel(struct ion_heap *heap, |
| 66 | struct ion_buffer *buffer) |
| 67 | { |
| 68 | vunmap(buffer->vaddr); |
| 69 | } |
| 70 | |
| 71 | int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, |
| 72 | struct vm_area_struct *vma) |
| 73 | { |
| 74 | struct sg_table *table = buffer->sg_table; |
| 75 | unsigned long addr = vma->vm_start; |
| 76 | unsigned long offset = vma->vm_pgoff * PAGE_SIZE; |
| 77 | struct scatterlist *sg; |
| 78 | int i; |
Colin Cross | e460bc5 | 2013-12-13 19:26:23 -0800 | [diff] [blame] | 79 | int ret; |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 80 | |
| 81 | for_each_sg(table->sgl, sg, table->nents, i) { |
| 82 | struct page *page = sg_page(sg); |
| 83 | unsigned long remainder = vma->vm_end - addr; |
Colin Cross | 06e0dca | 2013-12-13 14:25:02 -0800 | [diff] [blame] | 84 | unsigned long len = sg->length; |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 85 | |
Colin Cross | 06e0dca | 2013-12-13 14:25:02 -0800 | [diff] [blame] | 86 | if (offset >= sg->length) { |
| 87 | offset -= sg->length; |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 88 | continue; |
| 89 | } else if (offset) { |
| 90 | page += offset / PAGE_SIZE; |
Colin Cross | 06e0dca | 2013-12-13 14:25:02 -0800 | [diff] [blame] | 91 | len = sg->length - offset; |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 92 | offset = 0; |
| 93 | } |
| 94 | len = min(len, remainder); |
Colin Cross | e460bc5 | 2013-12-13 19:26:23 -0800 | [diff] [blame] | 95 | ret = remap_pfn_range(vma, addr, page_to_pfn(page), len, |
Ben LeMasurier | 33a5956 | 2016-08-16 21:02:00 -0600 | [diff] [blame] | 96 | vma->vm_page_prot); |
Colin Cross | e460bc5 | 2013-12-13 19:26:23 -0800 | [diff] [blame] | 97 | if (ret) |
| 98 | return ret; |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 99 | addr += len; |
| 100 | if (addr >= vma->vm_end) |
| 101 | return 0; |
| 102 | } |
| 103 | return 0; |
| 104 | } |
| 105 | |
Colin Cross | 8b312bb | 2013-12-13 19:26:21 -0800 | [diff] [blame] | 106 | static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) |
| 107 | { |
| 108 | void *addr = vm_map_ram(pages, num, -1, pgprot); |
Seunghun Lee | 10f6286 | 2014-05-01 01:30:23 +0900 | [diff] [blame] | 109 | |
Colin Cross | 8b312bb | 2013-12-13 19:26:21 -0800 | [diff] [blame] | 110 | if (!addr) |
| 111 | return -ENOMEM; |
| 112 | memset(addr, 0, PAGE_SIZE * num); |
| 113 | vm_unmap_ram(addr, num); |
| 114 | |
| 115 | return 0; |
| 116 | } |
| 117 | |
Colin Cross | df6cf5c | 2013-12-13 19:26:30 -0800 | [diff] [blame] | 118 | static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents, |
Ben LeMasurier | 33a5956 | 2016-08-16 21:02:00 -0600 | [diff] [blame] | 119 | pgprot_t pgprot) |
Colin Cross | df6cf5c | 2013-12-13 19:26:30 -0800 | [diff] [blame] | 120 | { |
| 121 | int p = 0; |
| 122 | int ret = 0; |
| 123 | struct sg_page_iter piter; |
| 124 | struct page *pages[32]; |
| 125 | |
| 126 | for_each_sg_page(sgl, &piter, nents, 0) { |
| 127 | pages[p++] = sg_page_iter_page(&piter); |
| 128 | if (p == ARRAY_SIZE(pages)) { |
| 129 | ret = ion_heap_clear_pages(pages, p, pgprot); |
| 130 | if (ret) |
| 131 | return ret; |
| 132 | p = 0; |
| 133 | } |
| 134 | } |
| 135 | if (p) |
| 136 | ret = ion_heap_clear_pages(pages, p, pgprot); |
| 137 | |
| 138 | return ret; |
| 139 | } |
| 140 | |
Rebecca Schultz Zavin | 0b6b2cd | 2013-12-13 14:24:32 -0800 | [diff] [blame] | 141 | int ion_heap_buffer_zero(struct ion_buffer *buffer) |
| 142 | { |
| 143 | struct sg_table *table = buffer->sg_table; |
| 144 | pgprot_t pgprot; |
Rebecca Schultz Zavin | 0b6b2cd | 2013-12-13 14:24:32 -0800 | [diff] [blame] | 145 | |
| 146 | if (buffer->flags & ION_FLAG_CACHED) |
| 147 | pgprot = PAGE_KERNEL; |
| 148 | else |
| 149 | pgprot = pgprot_writecombine(PAGE_KERNEL); |
| 150 | |
Colin Cross | df6cf5c | 2013-12-13 19:26:30 -0800 | [diff] [blame] | 151 | return ion_heap_sglist_zero(table->sgl, table->nents, pgprot); |
| 152 | } |
Rebecca Schultz Zavin | 0b6b2cd | 2013-12-13 14:24:32 -0800 | [diff] [blame] | 153 | |
Colin Cross | df6cf5c | 2013-12-13 19:26:30 -0800 | [diff] [blame] | 154 | int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot) |
| 155 | { |
| 156 | struct scatterlist sg; |
| 157 | |
| 158 | sg_init_table(&sg, 1); |
| 159 | sg_set_page(&sg, page, size, 0); |
| 160 | return ion_heap_sglist_zero(&sg, 1, pgprot); |
Rebecca Schultz Zavin | 0b6b2cd | 2013-12-13 14:24:32 -0800 | [diff] [blame] | 161 | } |
| 162 | |
John Stultz | e1d855b | 2013-12-13 19:26:33 -0800 | [diff] [blame] | 163 | void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer) |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 164 | { |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 165 | spin_lock(&heap->free_lock); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 166 | list_add(&buffer->list, &heap->free_list); |
| 167 | heap->free_list_size += buffer->size; |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 168 | spin_unlock(&heap->free_lock); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 169 | wake_up(&heap->waitqueue); |
| 170 | } |
| 171 | |
| 172 | size_t ion_heap_freelist_size(struct ion_heap *heap) |
| 173 | { |
| 174 | size_t size; |
| 175 | |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 176 | spin_lock(&heap->free_lock); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 177 | size = heap->free_list_size; |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 178 | spin_unlock(&heap->free_lock); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 179 | |
| 180 | return size; |
| 181 | } |
| 182 | |
Mitchel Humpherys | 53a91c6 | 2014-02-17 13:58:39 -0800 | [diff] [blame] | 183 | static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size, |
Ben LeMasurier | 33a5956 | 2016-08-16 21:02:00 -0600 | [diff] [blame] | 184 | bool skip_pools) |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 185 | { |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 186 | struct ion_buffer *buffer; |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 187 | size_t total_drained = 0; |
| 188 | |
| 189 | if (ion_heap_freelist_size(heap) == 0) |
| 190 | return 0; |
| 191 | |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 192 | spin_lock(&heap->free_lock); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 193 | if (size == 0) |
| 194 | size = heap->free_list_size; |
| 195 | |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 196 | while (!list_empty(&heap->free_list)) { |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 197 | if (total_drained >= size) |
| 198 | break; |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 199 | buffer = list_first_entry(&heap->free_list, struct ion_buffer, |
| 200 | list); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 201 | list_del(&buffer->list); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 202 | heap->free_list_size -= buffer->size; |
Mitchel Humpherys | 53a91c6 | 2014-02-17 13:58:39 -0800 | [diff] [blame] | 203 | if (skip_pools) |
| 204 | buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE; |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 205 | total_drained += buffer->size; |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 206 | spin_unlock(&heap->free_lock); |
Mitchel Humpherys | f020b44 | 2013-12-13 19:26:17 -0800 | [diff] [blame] | 207 | ion_buffer_destroy(buffer); |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 208 | spin_lock(&heap->free_lock); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 209 | } |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 210 | spin_unlock(&heap->free_lock); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 211 | |
| 212 | return total_drained; |
| 213 | } |
| 214 | |
Mitchel Humpherys | 53a91c6 | 2014-02-17 13:58:39 -0800 | [diff] [blame] | 215 | size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size) |
| 216 | { |
| 217 | return _ion_heap_freelist_drain(heap, size, false); |
| 218 | } |
| 219 | |
| 220 | size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size) |
| 221 | { |
| 222 | return _ion_heap_freelist_drain(heap, size, true); |
| 223 | } |
| 224 | |
Colin Cross | f63958d | 2013-12-13 19:26:28 -0800 | [diff] [blame] | 225 | static int ion_heap_deferred_free(void *data) |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 226 | { |
| 227 | struct ion_heap *heap = data; |
| 228 | |
| 229 | while (true) { |
| 230 | struct ion_buffer *buffer; |
| 231 | |
| 232 | wait_event_freezable(heap->waitqueue, |
| 233 | ion_heap_freelist_size(heap) > 0); |
| 234 | |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 235 | spin_lock(&heap->free_lock); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 236 | if (list_empty(&heap->free_list)) { |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 237 | spin_unlock(&heap->free_lock); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 238 | continue; |
| 239 | } |
| 240 | buffer = list_first_entry(&heap->free_list, struct ion_buffer, |
| 241 | list); |
| 242 | list_del(&buffer->list); |
| 243 | heap->free_list_size -= buffer->size; |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 244 | spin_unlock(&heap->free_lock); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 245 | ion_buffer_destroy(buffer); |
| 246 | } |
| 247 | |
| 248 | return 0; |
| 249 | } |
| 250 | |
| 251 | int ion_heap_init_deferred_free(struct ion_heap *heap) |
| 252 | { |
| 253 | struct sched_param param = { .sched_priority = 0 }; |
| 254 | |
| 255 | INIT_LIST_HEAD(&heap->free_list); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 256 | init_waitqueue_head(&heap->waitqueue); |
| 257 | heap->task = kthread_run(ion_heap_deferred_free, heap, |
| 258 | "%s", heap->name); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 259 | if (IS_ERR(heap->task)) { |
| 260 | pr_err("%s: creating thread for deferred free failed\n", |
| 261 | __func__); |
Sachin Kamat | ab0c069 | 2014-01-27 12:17:05 +0530 | [diff] [blame] | 262 | return PTR_ERR_OR_ZERO(heap->task); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 263 | } |
Dan Carpenter | 54de9af | 2014-01-22 17:20:03 +0300 | [diff] [blame] | 264 | sched_setscheduler(heap->task, SCHED_IDLE, ¶m); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 265 | return 0; |
| 266 | } |
| 267 | |
Colin Cross | b9daf0b | 2014-02-17 13:58:38 -0800 | [diff] [blame] | 268 | static unsigned long ion_heap_shrink_count(struct shrinker *shrinker, |
Ben LeMasurier | 33a5956 | 2016-08-16 21:02:00 -0600 | [diff] [blame] | 269 | struct shrink_control *sc) |
Colin Cross | b9daf0b | 2014-02-17 13:58:38 -0800 | [diff] [blame] | 270 | { |
| 271 | struct ion_heap *heap = container_of(shrinker, struct ion_heap, |
| 272 | shrinker); |
| 273 | int total = 0; |
| 274 | |
| 275 | total = ion_heap_freelist_size(heap) / PAGE_SIZE; |
| 276 | if (heap->ops->shrink) |
| 277 | total += heap->ops->shrink(heap, sc->gfp_mask, 0); |
| 278 | return total; |
| 279 | } |
| 280 | |
| 281 | static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker, |
Ben LeMasurier | 33a5956 | 2016-08-16 21:02:00 -0600 | [diff] [blame] | 282 | struct shrink_control *sc) |
Colin Cross | b9daf0b | 2014-02-17 13:58:38 -0800 | [diff] [blame] | 283 | { |
| 284 | struct ion_heap *heap = container_of(shrinker, struct ion_heap, |
| 285 | shrinker); |
| 286 | int freed = 0; |
| 287 | int to_scan = sc->nr_to_scan; |
| 288 | |
| 289 | if (to_scan == 0) |
| 290 | return 0; |
| 291 | |
| 292 | /* |
| 293 | * shrink the free list first, no point in zeroing the memory if we're |
Mitchel Humpherys | 53a91c6 | 2014-02-17 13:58:39 -0800 | [diff] [blame] | 294 | * just going to reclaim it. Also, skip any possible page pooling. |
Colin Cross | b9daf0b | 2014-02-17 13:58:38 -0800 | [diff] [blame] | 295 | */ |
| 296 | if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) |
Mitchel Humpherys | 53a91c6 | 2014-02-17 13:58:39 -0800 | [diff] [blame] | 297 | freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) / |
Colin Cross | b9daf0b | 2014-02-17 13:58:38 -0800 | [diff] [blame] | 298 | PAGE_SIZE; |
| 299 | |
| 300 | to_scan -= freed; |
| 301 | if (to_scan <= 0) |
| 302 | return freed; |
| 303 | |
| 304 | if (heap->ops->shrink) |
| 305 | freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan); |
| 306 | return freed; |
| 307 | } |
| 308 | |
| 309 | void ion_heap_init_shrinker(struct ion_heap *heap) |
| 310 | { |
| 311 | heap->shrinker.count_objects = ion_heap_shrink_count; |
| 312 | heap->shrinker.scan_objects = ion_heap_shrink_scan; |
| 313 | heap->shrinker.seeks = DEFAULT_SEEKS; |
| 314 | heap->shrinker.batch = 0; |
| 315 | register_shrinker(&heap->shrinker); |
| 316 | } |
| 317 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 318 | struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) |
| 319 | { |
| 320 | struct ion_heap *heap = NULL; |
| 321 | |
| 322 | switch (heap_data->type) { |
| 323 | case ION_HEAP_TYPE_SYSTEM_CONTIG: |
| 324 | heap = ion_system_contig_heap_create(heap_data); |
| 325 | break; |
| 326 | case ION_HEAP_TYPE_SYSTEM: |
| 327 | heap = ion_system_heap_create(heap_data); |
| 328 | break; |
| 329 | case ION_HEAP_TYPE_CARVEOUT: |
| 330 | heap = ion_carveout_heap_create(heap_data); |
| 331 | break; |
Rebecca Schultz Zavin | e3c2eb7 | 2013-12-13 14:24:27 -0800 | [diff] [blame] | 332 | case ION_HEAP_TYPE_CHUNK: |
| 333 | heap = ion_chunk_heap_create(heap_data); |
| 334 | break; |
Benjamin Gaignard | 349c9e1 | 2013-12-13 14:24:44 -0800 | [diff] [blame] | 335 | case ION_HEAP_TYPE_DMA: |
| 336 | heap = ion_cma_heap_create(heap_data); |
| 337 | break; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 338 | default: |
| 339 | pr_err("%s: Invalid heap type %d\n", __func__, |
| 340 | heap_data->type); |
| 341 | return ERR_PTR(-EINVAL); |
| 342 | } |
| 343 | |
| 344 | if (IS_ERR_OR_NULL(heap)) { |
Colin Cross | e61fc91 | 2013-12-13 19:26:14 -0800 | [diff] [blame] | 345 | pr_err("%s: error creating heap %s type %d base %lu size %zu\n", |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 346 | __func__, heap_data->name, heap_data->type, |
| 347 | heap_data->base, heap_data->size); |
| 348 | return ERR_PTR(-EINVAL); |
| 349 | } |
| 350 | |
| 351 | heap->name = heap_data->name; |
| 352 | heap->id = heap_data->id; |
| 353 | return heap; |
| 354 | } |
Paul Gortmaker | 8c6c463 | 2015-10-13 16:46:53 -0400 | [diff] [blame] | 355 | EXPORT_SYMBOL(ion_heap_create); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 356 | |
| 357 | void ion_heap_destroy(struct ion_heap *heap) |
| 358 | { |
| 359 | if (!heap) |
| 360 | return; |
| 361 | |
| 362 | switch (heap->type) { |
| 363 | case ION_HEAP_TYPE_SYSTEM_CONTIG: |
| 364 | ion_system_contig_heap_destroy(heap); |
| 365 | break; |
| 366 | case ION_HEAP_TYPE_SYSTEM: |
| 367 | ion_system_heap_destroy(heap); |
| 368 | break; |
| 369 | case ION_HEAP_TYPE_CARVEOUT: |
| 370 | ion_carveout_heap_destroy(heap); |
| 371 | break; |
Rebecca Schultz Zavin | e3c2eb7 | 2013-12-13 14:24:27 -0800 | [diff] [blame] | 372 | case ION_HEAP_TYPE_CHUNK: |
| 373 | ion_chunk_heap_destroy(heap); |
| 374 | break; |
Benjamin Gaignard | 349c9e1 | 2013-12-13 14:24:44 -0800 | [diff] [blame] | 375 | case ION_HEAP_TYPE_DMA: |
| 376 | ion_cma_heap_destroy(heap); |
| 377 | break; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 378 | default: |
| 379 | pr_err("%s: Invalid heap type %d\n", __func__, |
| 380 | heap->type); |
| 381 | } |
| 382 | } |
Paul Gortmaker | 8c6c463 | 2015-10-13 16:46:53 -0400 | [diff] [blame] | 383 | EXPORT_SYMBOL(ion_heap_destroy); |