Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1 | /* |
| 2 | * drivers/staging/android/ion/ion_heap.c |
| 3 | * |
| 4 | * Copyright (C) 2011 Google, Inc. |
| 5 | * |
| 6 | * This software is licensed under the terms of the GNU General Public |
| 7 | * License version 2, as published by the Free Software Foundation, and |
| 8 | * may be copied, distributed, and modified under those terms. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | */ |
| 16 | |
| 17 | #include <linux/err.h> |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 18 | #include <linux/freezer.h> |
| 19 | #include <linux/kthread.h> |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 20 | #include <linux/mm.h> |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 21 | #include <linux/rtmutex.h> |
| 22 | #include <linux/sched.h> |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 23 | #include <linux/scatterlist.h> |
| 24 | #include <linux/vmalloc.h> |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 25 | #include "ion.h" |
| 26 | #include "ion_priv.h" |
| 27 | |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 28 | void *ion_heap_map_kernel(struct ion_heap *heap, |
| 29 | struct ion_buffer *buffer) |
| 30 | { |
| 31 | struct scatterlist *sg; |
| 32 | int i, j; |
| 33 | void *vaddr; |
| 34 | pgprot_t pgprot; |
| 35 | struct sg_table *table = buffer->sg_table; |
| 36 | int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; |
| 37 | struct page **pages = vmalloc(sizeof(struct page *) * npages); |
| 38 | struct page **tmp = pages; |
| 39 | |
| 40 | if (!pages) |
Colin Cross | f63958d | 2013-12-13 19:26:28 -0800 | [diff] [blame] | 41 | return NULL; |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 42 | |
| 43 | if (buffer->flags & ION_FLAG_CACHED) |
| 44 | pgprot = PAGE_KERNEL; |
| 45 | else |
| 46 | pgprot = pgprot_writecombine(PAGE_KERNEL); |
| 47 | |
| 48 | for_each_sg(table->sgl, sg, table->nents, i) { |
Colin Cross | 06e0dca | 2013-12-13 14:25:02 -0800 | [diff] [blame] | 49 | int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE; |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 50 | struct page *page = sg_page(sg); |
| 51 | BUG_ON(i >= npages); |
John Stultz | e1d855b | 2013-12-13 19:26:33 -0800 | [diff] [blame] | 52 | for (j = 0; j < npages_this_entry; j++) |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 53 | *(tmp++) = page++; |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 54 | } |
| 55 | vaddr = vmap(pages, npages, VM_MAP, pgprot); |
| 56 | vfree(pages); |
| 57 | |
Colin Cross | dfc4a9b | 2013-12-13 14:24:48 -0800 | [diff] [blame] | 58 | if (vaddr == NULL) |
| 59 | return ERR_PTR(-ENOMEM); |
| 60 | |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 61 | return vaddr; |
| 62 | } |
| 63 | |
| 64 | void ion_heap_unmap_kernel(struct ion_heap *heap, |
| 65 | struct ion_buffer *buffer) |
| 66 | { |
| 67 | vunmap(buffer->vaddr); |
| 68 | } |
| 69 | |
| 70 | int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, |
| 71 | struct vm_area_struct *vma) |
| 72 | { |
| 73 | struct sg_table *table = buffer->sg_table; |
| 74 | unsigned long addr = vma->vm_start; |
| 75 | unsigned long offset = vma->vm_pgoff * PAGE_SIZE; |
| 76 | struct scatterlist *sg; |
| 77 | int i; |
Colin Cross | e460bc5 | 2013-12-13 19:26:23 -0800 | [diff] [blame] | 78 | int ret; |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 79 | |
| 80 | for_each_sg(table->sgl, sg, table->nents, i) { |
| 81 | struct page *page = sg_page(sg); |
| 82 | unsigned long remainder = vma->vm_end - addr; |
Colin Cross | 06e0dca | 2013-12-13 14:25:02 -0800 | [diff] [blame] | 83 | unsigned long len = sg->length; |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 84 | |
Colin Cross | 06e0dca | 2013-12-13 14:25:02 -0800 | [diff] [blame] | 85 | if (offset >= sg->length) { |
| 86 | offset -= sg->length; |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 87 | continue; |
| 88 | } else if (offset) { |
| 89 | page += offset / PAGE_SIZE; |
Colin Cross | 06e0dca | 2013-12-13 14:25:02 -0800 | [diff] [blame] | 90 | len = sg->length - offset; |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 91 | offset = 0; |
| 92 | } |
| 93 | len = min(len, remainder); |
Colin Cross | e460bc5 | 2013-12-13 19:26:23 -0800 | [diff] [blame] | 94 | ret = remap_pfn_range(vma, addr, page_to_pfn(page), len, |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 95 | vma->vm_page_prot); |
Colin Cross | e460bc5 | 2013-12-13 19:26:23 -0800 | [diff] [blame] | 96 | if (ret) |
| 97 | return ret; |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 98 | addr += len; |
| 99 | if (addr >= vma->vm_end) |
| 100 | return 0; |
| 101 | } |
| 102 | return 0; |
| 103 | } |
| 104 | |
Colin Cross | 8b312bb | 2013-12-13 19:26:21 -0800 | [diff] [blame] | 105 | static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) |
| 106 | { |
| 107 | void *addr = vm_map_ram(pages, num, -1, pgprot); |
| 108 | if (!addr) |
| 109 | return -ENOMEM; |
| 110 | memset(addr, 0, PAGE_SIZE * num); |
| 111 | vm_unmap_ram(addr, num); |
| 112 | |
| 113 | return 0; |
| 114 | } |
| 115 | |
Colin Cross | df6cf5c | 2013-12-13 19:26:30 -0800 | [diff] [blame] | 116 | static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents, |
| 117 | pgprot_t pgprot) |
| 118 | { |
| 119 | int p = 0; |
| 120 | int ret = 0; |
| 121 | struct sg_page_iter piter; |
| 122 | struct page *pages[32]; |
| 123 | |
| 124 | for_each_sg_page(sgl, &piter, nents, 0) { |
| 125 | pages[p++] = sg_page_iter_page(&piter); |
| 126 | if (p == ARRAY_SIZE(pages)) { |
| 127 | ret = ion_heap_clear_pages(pages, p, pgprot); |
| 128 | if (ret) |
| 129 | return ret; |
| 130 | p = 0; |
| 131 | } |
| 132 | } |
| 133 | if (p) |
| 134 | ret = ion_heap_clear_pages(pages, p, pgprot); |
| 135 | |
| 136 | return ret; |
| 137 | } |
| 138 | |
Rebecca Schultz Zavin | 0b6b2cd | 2013-12-13 14:24:32 -0800 | [diff] [blame] | 139 | int ion_heap_buffer_zero(struct ion_buffer *buffer) |
| 140 | { |
| 141 | struct sg_table *table = buffer->sg_table; |
| 142 | pgprot_t pgprot; |
Rebecca Schultz Zavin | 0b6b2cd | 2013-12-13 14:24:32 -0800 | [diff] [blame] | 143 | |
| 144 | if (buffer->flags & ION_FLAG_CACHED) |
| 145 | pgprot = PAGE_KERNEL; |
| 146 | else |
| 147 | pgprot = pgprot_writecombine(PAGE_KERNEL); |
| 148 | |
Colin Cross | df6cf5c | 2013-12-13 19:26:30 -0800 | [diff] [blame] | 149 | return ion_heap_sglist_zero(table->sgl, table->nents, pgprot); |
| 150 | } |
Rebecca Schultz Zavin | 0b6b2cd | 2013-12-13 14:24:32 -0800 | [diff] [blame] | 151 | |
Colin Cross | df6cf5c | 2013-12-13 19:26:30 -0800 | [diff] [blame] | 152 | int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot) |
| 153 | { |
| 154 | struct scatterlist sg; |
| 155 | |
| 156 | sg_init_table(&sg, 1); |
| 157 | sg_set_page(&sg, page, size, 0); |
| 158 | return ion_heap_sglist_zero(&sg, 1, pgprot); |
Rebecca Schultz Zavin | 0b6b2cd | 2013-12-13 14:24:32 -0800 | [diff] [blame] | 159 | } |
| 160 | |
John Stultz | e1d855b | 2013-12-13 19:26:33 -0800 | [diff] [blame] | 161 | void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer) |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 162 | { |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 163 | spin_lock(&heap->free_lock); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 164 | list_add(&buffer->list, &heap->free_list); |
| 165 | heap->free_list_size += buffer->size; |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 166 | spin_unlock(&heap->free_lock); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 167 | wake_up(&heap->waitqueue); |
| 168 | } |
| 169 | |
| 170 | size_t ion_heap_freelist_size(struct ion_heap *heap) |
| 171 | { |
| 172 | size_t size; |
| 173 | |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 174 | spin_lock(&heap->free_lock); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 175 | size = heap->free_list_size; |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 176 | spin_unlock(&heap->free_lock); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 177 | |
| 178 | return size; |
| 179 | } |
| 180 | |
| 181 | size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size) |
| 182 | { |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 183 | struct ion_buffer *buffer; |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 184 | size_t total_drained = 0; |
| 185 | |
| 186 | if (ion_heap_freelist_size(heap) == 0) |
| 187 | return 0; |
| 188 | |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 189 | spin_lock(&heap->free_lock); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 190 | if (size == 0) |
| 191 | size = heap->free_list_size; |
| 192 | |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 193 | while (!list_empty(&heap->free_list)) { |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 194 | if (total_drained >= size) |
| 195 | break; |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 196 | buffer = list_first_entry(&heap->free_list, struct ion_buffer, |
| 197 | list); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 198 | list_del(&buffer->list); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 199 | heap->free_list_size -= buffer->size; |
| 200 | total_drained += buffer->size; |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 201 | spin_unlock(&heap->free_lock); |
Mitchel Humpherys | f020b44 | 2013-12-13 19:26:17 -0800 | [diff] [blame] | 202 | ion_buffer_destroy(buffer); |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 203 | spin_lock(&heap->free_lock); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 204 | } |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 205 | spin_unlock(&heap->free_lock); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 206 | |
| 207 | return total_drained; |
| 208 | } |
| 209 | |
Colin Cross | f63958d | 2013-12-13 19:26:28 -0800 | [diff] [blame] | 210 | static int ion_heap_deferred_free(void *data) |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 211 | { |
| 212 | struct ion_heap *heap = data; |
| 213 | |
| 214 | while (true) { |
| 215 | struct ion_buffer *buffer; |
| 216 | |
| 217 | wait_event_freezable(heap->waitqueue, |
| 218 | ion_heap_freelist_size(heap) > 0); |
| 219 | |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 220 | spin_lock(&heap->free_lock); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 221 | if (list_empty(&heap->free_list)) { |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 222 | spin_unlock(&heap->free_lock); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 223 | continue; |
| 224 | } |
| 225 | buffer = list_first_entry(&heap->free_list, struct ion_buffer, |
| 226 | list); |
| 227 | list_del(&buffer->list); |
| 228 | heap->free_list_size -= buffer->size; |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 229 | spin_unlock(&heap->free_lock); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 230 | ion_buffer_destroy(buffer); |
| 231 | } |
| 232 | |
| 233 | return 0; |
| 234 | } |
| 235 | |
| 236 | int ion_heap_init_deferred_free(struct ion_heap *heap) |
| 237 | { |
| 238 | struct sched_param param = { .sched_priority = 0 }; |
| 239 | |
| 240 | INIT_LIST_HEAD(&heap->free_list); |
| 241 | heap->free_list_size = 0; |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 242 | spin_lock_init(&heap->free_lock); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 243 | init_waitqueue_head(&heap->waitqueue); |
| 244 | heap->task = kthread_run(ion_heap_deferred_free, heap, |
| 245 | "%s", heap->name); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 246 | if (IS_ERR(heap->task)) { |
| 247 | pr_err("%s: creating thread for deferred free failed\n", |
| 248 | __func__); |
| 249 | return PTR_RET(heap->task); |
| 250 | } |
Dan Carpenter | 54de9af | 2014-01-22 17:20:03 +0300 | [diff] [blame] | 251 | sched_setscheduler(heap->task, SCHED_IDLE, ¶m); |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 252 | return 0; |
| 253 | } |
| 254 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 255 | struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) |
| 256 | { |
| 257 | struct ion_heap *heap = NULL; |
| 258 | |
| 259 | switch (heap_data->type) { |
| 260 | case ION_HEAP_TYPE_SYSTEM_CONTIG: |
| 261 | heap = ion_system_contig_heap_create(heap_data); |
| 262 | break; |
| 263 | case ION_HEAP_TYPE_SYSTEM: |
| 264 | heap = ion_system_heap_create(heap_data); |
| 265 | break; |
| 266 | case ION_HEAP_TYPE_CARVEOUT: |
| 267 | heap = ion_carveout_heap_create(heap_data); |
| 268 | break; |
Rebecca Schultz Zavin | e3c2eb7 | 2013-12-13 14:24:27 -0800 | [diff] [blame] | 269 | case ION_HEAP_TYPE_CHUNK: |
| 270 | heap = ion_chunk_heap_create(heap_data); |
| 271 | break; |
Benjamin Gaignard | 349c9e1 | 2013-12-13 14:24:44 -0800 | [diff] [blame] | 272 | case ION_HEAP_TYPE_DMA: |
| 273 | heap = ion_cma_heap_create(heap_data); |
| 274 | break; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 275 | default: |
| 276 | pr_err("%s: Invalid heap type %d\n", __func__, |
| 277 | heap_data->type); |
| 278 | return ERR_PTR(-EINVAL); |
| 279 | } |
| 280 | |
| 281 | if (IS_ERR_OR_NULL(heap)) { |
Colin Cross | e61fc91 | 2013-12-13 19:26:14 -0800 | [diff] [blame] | 282 | pr_err("%s: error creating heap %s type %d base %lu size %zu\n", |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 283 | __func__, heap_data->name, heap_data->type, |
| 284 | heap_data->base, heap_data->size); |
| 285 | return ERR_PTR(-EINVAL); |
| 286 | } |
| 287 | |
| 288 | heap->name = heap_data->name; |
| 289 | heap->id = heap_data->id; |
| 290 | return heap; |
| 291 | } |
| 292 | |
| 293 | void ion_heap_destroy(struct ion_heap *heap) |
| 294 | { |
| 295 | if (!heap) |
| 296 | return; |
| 297 | |
| 298 | switch (heap->type) { |
| 299 | case ION_HEAP_TYPE_SYSTEM_CONTIG: |
| 300 | ion_system_contig_heap_destroy(heap); |
| 301 | break; |
| 302 | case ION_HEAP_TYPE_SYSTEM: |
| 303 | ion_system_heap_destroy(heap); |
| 304 | break; |
| 305 | case ION_HEAP_TYPE_CARVEOUT: |
| 306 | ion_carveout_heap_destroy(heap); |
| 307 | break; |
Rebecca Schultz Zavin | e3c2eb7 | 2013-12-13 14:24:27 -0800 | [diff] [blame] | 308 | case ION_HEAP_TYPE_CHUNK: |
| 309 | ion_chunk_heap_destroy(heap); |
| 310 | break; |
Benjamin Gaignard | 349c9e1 | 2013-12-13 14:24:44 -0800 | [diff] [blame] | 311 | case ION_HEAP_TYPE_DMA: |
| 312 | ion_cma_heap_destroy(heap); |
| 313 | break; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 314 | default: |
| 315 | pr_err("%s: Invalid heap type %d\n", __func__, |
| 316 | heap->type); |
| 317 | } |
| 318 | } |