Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1 | /* |
| 2 | * drivers/gpu/ion/ion_system_heap.c |
| 3 | * |
| 4 | * Copyright (C) 2011 Google, Inc. |
Mitchel Humpherys | af3b522 | 2013-01-15 15:38:52 -0800 | [diff] [blame] | 5 | * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 6 | * |
| 7 | * This software is licensed under the terms of the GNU General Public |
| 8 | * License version 2, as published by the Free Software Foundation, and |
| 9 | * may be copied, distributed, and modified under those terms. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | */ |
| 17 | |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 18 | #include <asm/page.h> |
| 19 | #include <linux/dma-mapping.h> |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 20 | #include <linux/err.h> |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 21 | #include <linux/highmem.h> |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 22 | #include <linux/ion.h> |
| 23 | #include <linux/mm.h> |
| 24 | #include <linux/scatterlist.h> |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 25 | #include <linux/seq_file.h> |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 26 | #include <linux/slab.h> |
| 27 | #include <linux/vmalloc.h> |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 28 | #include <linux/seq_file.h> |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 29 | #include "ion_priv.h" |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 30 | #include <mach/memory.h> |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 31 | #include <asm/cacheflush.h> |
Mitchel Humpherys | af2e5c5 | 2012-09-06 12:16:36 -0700 | [diff] [blame] | 32 | #include <linux/msm_ion.h> |
Neeti Desai | 3f3c282 | 2013-03-08 17:29:53 -0800 | [diff] [blame] | 33 | #include <linux/dma-mapping.h> |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 34 | |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 35 | static atomic_t system_heap_allocated; |
| 36 | static atomic_t system_contig_heap_allocated; |
| 37 | |
Rebecca Schultz Zavin | bff299e | 2012-10-02 22:43:41 -0700 | [diff] [blame] | 38 | static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | |
| 39 | __GFP_NOWARN | __GFP_NORETRY | |
Rebecca Schultz Zavin | 1797e59a | 2012-10-18 21:51:53 -0700 | [diff] [blame^] | 40 | __GFP_NO_KSWAPD) & ~__GFP_WAIT; |
Rebecca Schultz Zavin | bff299e | 2012-10-02 22:43:41 -0700 | [diff] [blame] | 41 | static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | |
| 42 | __GFP_NOWARN); |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 43 | static const unsigned int orders[] = {8, 4, 0}; |
| 44 | static const int num_orders = ARRAY_SIZE(orders); |
| 45 | static int order_to_index(unsigned int order) |
| 46 | { |
| 47 | int i; |
| 48 | for (i = 0; i < num_orders; i++) |
| 49 | if (order == orders[i]) |
| 50 | return i; |
| 51 | BUG(); |
| 52 | return -1; |
| 53 | } |
| 54 | |
| 55 | static unsigned int order_to_size(int order) |
| 56 | { |
| 57 | return PAGE_SIZE << order; |
| 58 | } |
| 59 | |
| 60 | struct ion_system_heap { |
| 61 | struct ion_heap heap; |
| 62 | struct ion_page_pool **pools; |
| 63 | }; |
| 64 | |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 65 | struct page_info { |
| 66 | struct page *page; |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 67 | unsigned int order; |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 68 | struct list_head list; |
| 69 | }; |
| 70 | |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 71 | static struct page *alloc_buffer_page(struct ion_system_heap *heap, |
| 72 | struct ion_buffer *buffer, |
| 73 | unsigned long order) |
| 74 | { |
| 75 | bool cached = ion_buffer_cached(buffer); |
| 76 | bool split_pages = ion_buffer_fault_user_mappings(buffer); |
| 77 | struct ion_page_pool *pool = heap->pools[order_to_index(order)]; |
| 78 | struct page *page; |
Rebecca Schultz Zavin | 96dd58d | 2012-09-26 10:58:30 -0700 | [diff] [blame] | 79 | |
Rebecca Schultz Zavin | bff299e | 2012-10-02 22:43:41 -0700 | [diff] [blame] | 80 | if (!cached) { |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 81 | page = ion_page_pool_alloc(pool); |
Rebecca Schultz Zavin | bff299e | 2012-10-02 22:43:41 -0700 | [diff] [blame] | 82 | } else { |
| 83 | gfp_t gfp_flags = low_order_gfp_flags; |
| 84 | |
| 85 | if (order > 4) |
| 86 | gfp_flags = high_order_gfp_flags; |
| 87 | page = alloc_pages(gfp_flags, order); |
| 88 | } |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 89 | if (!page) |
| 90 | return 0; |
| 91 | if (split_pages) |
| 92 | split_page(page, order); |
| 93 | return page; |
| 94 | } |
| 95 | |
| 96 | static void free_buffer_page(struct ion_system_heap *heap, |
| 97 | struct ion_buffer *buffer, struct page *page, |
| 98 | unsigned int order) |
| 99 | { |
| 100 | bool cached = ion_buffer_cached(buffer); |
| 101 | bool split_pages = ion_buffer_fault_user_mappings(buffer); |
| 102 | int i; |
| 103 | |
| 104 | if (!cached) { |
| 105 | struct ion_page_pool *pool = heap->pools[order_to_index(order)]; |
| 106 | /* zero the pages before returning them to the pool for |
| 107 | security. This uses vmap as we want to set the pgprot so |
| 108 | the writes to occur to noncached mappings, as the pool's |
| 109 | purpose is to keep the pages out of the cache */ |
| 110 | for (i = 0; i < order / PAGE_SIZE; i++) { |
| 111 | struct page *sub_page = page + i; |
| 112 | void *addr = vmap(&sub_page, 1, VM_MAP, |
| 113 | pgprot_writecombine(PAGE_KERNEL)); |
| 114 | memset(addr, 0, PAGE_SIZE); |
| 115 | vunmap(addr); |
| 116 | } |
| 117 | ion_page_pool_free(pool, page); |
| 118 | } else if (split_pages) { |
| 119 | for (i = 0; i < (1 << order); i++) |
| 120 | __free_page(page + i); |
| 121 | } else { |
| 122 | __free_pages(page, order); |
| 123 | } |
| 124 | } |
| 125 | |
| 126 | |
| 127 | static struct page_info *alloc_largest_available(struct ion_system_heap *heap, |
| 128 | struct ion_buffer *buffer, |
| 129 | unsigned long size, |
Rebecca Schultz Zavin | 158316f | 2012-09-25 20:55:27 -0700 | [diff] [blame] | 130 | unsigned int max_order) |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 131 | { |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 132 | struct page *page; |
| 133 | struct page_info *info; |
| 134 | int i; |
| 135 | |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 136 | for (i = 0; i < num_orders; i++) { |
| 137 | if (size < order_to_size(orders[i])) |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 138 | continue; |
Rebecca Schultz Zavin | 158316f | 2012-09-25 20:55:27 -0700 | [diff] [blame] | 139 | if (max_order < orders[i]) |
| 140 | continue; |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 141 | |
| 142 | page = alloc_buffer_page(heap, buffer, orders[i]); |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 143 | if (!page) |
| 144 | continue; |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 145 | |
| 146 | info = kmalloc(sizeof(struct page_info), GFP_KERNEL); |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 147 | info->page = page; |
| 148 | info->order = orders[i]; |
| 149 | return info; |
| 150 | } |
| 151 | return NULL; |
| 152 | } |
| 153 | |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 154 | static int ion_system_heap_allocate(struct ion_heap *heap, |
| 155 | struct ion_buffer *buffer, |
| 156 | unsigned long size, unsigned long align, |
| 157 | unsigned long flags) |
| 158 | { |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 159 | struct ion_system_heap *sys_heap = container_of(heap, |
| 160 | struct ion_system_heap, |
| 161 | heap); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 162 | struct sg_table *table; |
| 163 | struct scatterlist *sg; |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 164 | int ret; |
| 165 | struct list_head pages; |
| 166 | struct page_info *info, *tmp_info; |
Rebecca Schultz Zavin | f858ba4 | 2012-09-21 11:46:06 -0700 | [diff] [blame] | 167 | int i = 0; |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 168 | long size_remaining = PAGE_ALIGN(size); |
Rebecca Schultz Zavin | 158316f | 2012-09-25 20:55:27 -0700 | [diff] [blame] | 169 | unsigned int max_order = orders[0]; |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 170 | bool split_pages = ion_buffer_fault_user_mappings(buffer); |
Rebecca Schultz Zavin | 158316f | 2012-09-25 20:55:27 -0700 | [diff] [blame] | 171 | |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 172 | INIT_LIST_HEAD(&pages); |
| 173 | while (size_remaining > 0) { |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 174 | info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order); |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 175 | if (!info) |
| 176 | goto err; |
| 177 | list_add_tail(&info->list, &pages); |
| 178 | size_remaining -= (1 << info->order) * PAGE_SIZE; |
Rebecca Schultz Zavin | 158316f | 2012-09-25 20:55:27 -0700 | [diff] [blame] | 179 | max_order = info->order; |
Rebecca Schultz Zavin | f858ba4 | 2012-09-21 11:46:06 -0700 | [diff] [blame] | 180 | i++; |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 181 | } |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 182 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 183 | table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); |
| 184 | if (!table) |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 185 | goto err; |
| 186 | |
Rebecca Schultz Zavin | f858ba4 | 2012-09-21 11:46:06 -0700 | [diff] [blame] | 187 | if (split_pages) |
| 188 | ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE, |
| 189 | GFP_KERNEL); |
| 190 | else |
| 191 | ret = sg_alloc_table(table, i, GFP_KERNEL); |
| 192 | |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 193 | if (ret) |
| 194 | goto err1; |
| 195 | |
| 196 | sg = table->sgl; |
| 197 | list_for_each_entry_safe(info, tmp_info, &pages, list) { |
| 198 | struct page *page = info->page; |
Rebecca Schultz Zavin | f858ba4 | 2012-09-21 11:46:06 -0700 | [diff] [blame] | 199 | if (split_pages) { |
| 200 | for (i = 0; i < (1 << info->order); i++) { |
| 201 | sg_set_page(sg, page + i, PAGE_SIZE, 0); |
| 202 | sg = sg_next(sg); |
| 203 | } |
| 204 | } else { |
| 205 | sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, |
| 206 | 0); |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 207 | sg = sg_next(sg); |
| 208 | } |
| 209 | list_del(&info->list); |
Rebecca Schultz Zavin | 6a93a29 | 2012-08-21 21:35:20 -0700 | [diff] [blame] | 210 | kfree(info); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 211 | } |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 212 | |
| 213 | dma_sync_sg_for_device(NULL, table->sgl, table->nents, |
| 214 | DMA_BIDIRECTIONAL); |
| 215 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 216 | buffer->priv_virt = table; |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 217 | atomic_add(size, &system_heap_allocated); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 218 | return 0; |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 219 | err1: |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 220 | kfree(table); |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 221 | err: |
| 222 | list_for_each_entry(info, &pages, list) { |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 223 | free_buffer_page(sys_heap, buffer, info->page, info->order); |
Rebecca Schultz Zavin | 6a93a29 | 2012-08-21 21:35:20 -0700 | [diff] [blame] | 224 | kfree(info); |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 225 | } |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 226 | return -ENOMEM; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 227 | } |
| 228 | |
| 229 | void ion_system_heap_free(struct ion_buffer *buffer) |
| 230 | { |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 231 | struct ion_heap *heap = buffer->heap; |
| 232 | struct ion_system_heap *sys_heap = container_of(heap, |
| 233 | struct ion_system_heap, |
| 234 | heap); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 235 | struct sg_table *table = buffer->priv_virt; |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 236 | struct scatterlist *sg; |
| 237 | LIST_HEAD(pages); |
| 238 | int i; |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 239 | |
| 240 | for_each_sg(table->sgl, sg, table->nents, i) |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 241 | free_buffer_page(sys_heap, buffer, sg_page(sg), get_order(sg_dma_len(sg))); |
| 242 | sg_free_table(table); |
| 243 | kfree(table); |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 244 | atomic_sub(buffer->size, &system_heap_allocated); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 245 | } |
| 246 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 247 | struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap, |
| 248 | struct ion_buffer *buffer) |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 249 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 250 | return buffer->priv_virt; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 251 | } |
| 252 | |
| 253 | void ion_system_heap_unmap_dma(struct ion_heap *heap, |
| 254 | struct ion_buffer *buffer) |
| 255 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 256 | return; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 257 | } |
| 258 | |
| 259 | void *ion_system_heap_map_kernel(struct ion_heap *heap, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 260 | struct ion_buffer *buffer) |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 261 | { |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 262 | struct scatterlist *sg; |
| 263 | int i, j; |
| 264 | void *vaddr; |
| 265 | pgprot_t pgprot; |
| 266 | struct sg_table *table = buffer->priv_virt; |
| 267 | int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; |
Rebecca Schultz Zavin | 21413b6 | 2012-09-30 14:53:27 -0700 | [diff] [blame] | 268 | struct page **pages = vmalloc(sizeof(struct page *) * npages); |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 269 | struct page **tmp = pages; |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 270 | |
Rebecca Schultz Zavin | 21413b6 | 2012-09-30 14:53:27 -0700 | [diff] [blame] | 271 | if (!pages) |
| 272 | return 0; |
| 273 | |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 274 | if (buffer->flags & ION_FLAG_CACHED) |
| 275 | pgprot = PAGE_KERNEL; |
| 276 | else |
| 277 | pgprot = pgprot_writecombine(PAGE_KERNEL); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 278 | |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 279 | for_each_sg(table->sgl, sg, table->nents, i) { |
| 280 | int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE; |
| 281 | struct page *page = sg_page(sg); |
| 282 | BUG_ON(i >= npages); |
| 283 | for (j = 0; j < npages_this_entry; j++) { |
| 284 | *(tmp++) = page++; |
| 285 | } |
Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 286 | } |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 287 | vaddr = vmap(pages, npages, VM_MAP, pgprot); |
Rebecca Schultz Zavin | 21413b6 | 2012-09-30 14:53:27 -0700 | [diff] [blame] | 288 | vfree(pages); |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 289 | |
| 290 | return vaddr; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 291 | } |
| 292 | |
| 293 | void ion_system_heap_unmap_kernel(struct ion_heap *heap, |
| 294 | struct ion_buffer *buffer) |
| 295 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 296 | vunmap(buffer->vaddr); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 297 | } |
| 298 | |
| 299 | int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 300 | struct vm_area_struct *vma) |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 301 | { |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 302 | struct sg_table *table = buffer->priv_virt; |
| 303 | unsigned long addr = vma->vm_start; |
| 304 | unsigned long offset = vma->vm_pgoff; |
| 305 | struct scatterlist *sg; |
| 306 | int i; |
| 307 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 308 | if (!ION_IS_CACHED(buffer->flags)) { |
Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 309 | pr_err("%s: cannot map system heap uncached\n", __func__); |
| 310 | return -EINVAL; |
| 311 | } |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 312 | |
| 313 | for_each_sg(table->sgl, sg, table->nents, i) { |
| 314 | if (offset) { |
| 315 | offset--; |
| 316 | continue; |
| 317 | } |
| 318 | remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)), |
| 319 | sg_dma_len(sg), vma->vm_page_prot); |
| 320 | addr += sg_dma_len(sg); |
Rebecca Schultz Zavin | 805f130 | 2012-08-09 21:29:52 -0700 | [diff] [blame] | 321 | if (addr >= vma->vm_end) |
| 322 | return 0; |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 323 | } |
| 324 | return 0; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 325 | } |
| 326 | |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 327 | static int ion_system_print_debug(struct ion_heap *heap, struct seq_file *s, |
| 328 | const struct rb_root *unused) |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 329 | { |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 330 | seq_printf(s, "total bytes currently allocated: %lx\n", |
| 331 | (unsigned long) atomic_read(&system_heap_allocated)); |
| 332 | |
| 333 | return 0; |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 334 | } |
| 335 | |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 336 | static struct ion_heap_ops system_heap_ops = { |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 337 | .allocate = ion_system_heap_allocate, |
| 338 | .free = ion_system_heap_free, |
| 339 | .map_dma = ion_system_heap_map_dma, |
| 340 | .unmap_dma = ion_system_heap_unmap_dma, |
| 341 | .map_kernel = ion_system_heap_map_kernel, |
| 342 | .unmap_kernel = ion_system_heap_unmap_kernel, |
| 343 | .map_user = ion_system_heap_map_user, |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 344 | .print_debug = ion_system_print_debug, |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 345 | }; |
| 346 | |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 347 | struct ion_heap *ion_system_heap_create(struct ion_platform_heap *pheap) |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 348 | { |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 349 | struct ion_system_heap *heap; |
| 350 | int i; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 351 | |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 352 | heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 353 | if (!heap) |
| 354 | return ERR_PTR(-ENOMEM); |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 355 | heap->heap.ops = &system_heap_ops; |
| 356 | heap->heap.type = ION_HEAP_TYPE_SYSTEM; |
| 357 | heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders, |
| 358 | GFP_KERNEL); |
| 359 | if (!heap->pools) |
| 360 | goto err_alloc_pools; |
| 361 | for (i = 0; i < num_orders; i++) { |
| 362 | struct ion_page_pool *pool; |
Rebecca Schultz Zavin | bff299e | 2012-10-02 22:43:41 -0700 | [diff] [blame] | 363 | gfp_t gfp_flags = low_order_gfp_flags; |
| 364 | |
| 365 | if (orders[i] > 4) |
| 366 | gfp_flags = high_order_gfp_flags; |
| 367 | pool = ion_page_pool_create(gfp_flags, orders[i]); |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 368 | if (!pool) |
| 369 | goto err_create_pool; |
| 370 | heap->pools[i] = pool; |
| 371 | } |
| 372 | return &heap->heap; |
| 373 | err_create_pool: |
| 374 | for (i = 0; i < num_orders; i++) |
| 375 | if (heap->pools[i]) |
| 376 | ion_page_pool_destroy(heap->pools[i]); |
| 377 | kfree(heap->pools); |
| 378 | err_alloc_pools: |
| 379 | kfree(heap); |
| 380 | return ERR_PTR(-ENOMEM); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 381 | } |
| 382 | |
| 383 | void ion_system_heap_destroy(struct ion_heap *heap) |
| 384 | { |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 385 | struct ion_system_heap *sys_heap = container_of(heap, |
| 386 | struct ion_system_heap, |
| 387 | heap); |
| 388 | int i; |
| 389 | |
| 390 | for (i = 0; i < num_orders; i++) |
| 391 | ion_page_pool_destroy(sys_heap->pools[i]); |
| 392 | kfree(sys_heap->pools); |
| 393 | kfree(sys_heap); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 394 | } |
| 395 | |
| 396 | static int ion_system_contig_heap_allocate(struct ion_heap *heap, |
| 397 | struct ion_buffer *buffer, |
| 398 | unsigned long len, |
| 399 | unsigned long align, |
| 400 | unsigned long flags) |
| 401 | { |
| 402 | buffer->priv_virt = kzalloc(len, GFP_KERNEL); |
| 403 | if (!buffer->priv_virt) |
| 404 | return -ENOMEM; |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 405 | atomic_add(len, &system_contig_heap_allocated); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 406 | return 0; |
| 407 | } |
| 408 | |
| 409 | void ion_system_contig_heap_free(struct ion_buffer *buffer) |
| 410 | { |
| 411 | kfree(buffer->priv_virt); |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 412 | atomic_sub(buffer->size, &system_contig_heap_allocated); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 413 | } |
| 414 | |
| 415 | static int ion_system_contig_heap_phys(struct ion_heap *heap, |
| 416 | struct ion_buffer *buffer, |
| 417 | ion_phys_addr_t *addr, size_t *len) |
| 418 | { |
| 419 | *addr = virt_to_phys(buffer->priv_virt); |
| 420 | *len = buffer->size; |
| 421 | return 0; |
| 422 | } |
| 423 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 424 | struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap, |
Rebecca Schultz Zavin | b179067 | 2012-06-14 15:08:53 -0700 | [diff] [blame] | 425 | struct ion_buffer *buffer) |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 426 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 427 | struct sg_table *table; |
| 428 | int ret; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 429 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 430 | table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); |
| 431 | if (!table) |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 432 | return ERR_PTR(-ENOMEM); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 433 | ret = sg_alloc_table(table, 1, GFP_KERNEL); |
| 434 | if (ret) { |
| 435 | kfree(table); |
| 436 | return ERR_PTR(ret); |
| 437 | } |
| 438 | sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size, |
| 439 | 0); |
| 440 | return table; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 441 | } |
| 442 | |
Rebecca Schultz Zavin | b179067 | 2012-06-14 15:08:53 -0700 | [diff] [blame] | 443 | void ion_system_contig_heap_unmap_dma(struct ion_heap *heap, |
| 444 | struct ion_buffer *buffer) |
| 445 | { |
| 446 | sg_free_table(buffer->sg_table); |
| 447 | kfree(buffer->sg_table); |
| 448 | } |
| 449 | |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 450 | int ion_system_contig_heap_map_user(struct ion_heap *heap, |
| 451 | struct ion_buffer *buffer, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 452 | struct vm_area_struct *vma) |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 453 | { |
| 454 | unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt)); |
Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 455 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 456 | if (ION_IS_CACHED(buffer->flags)) |
Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 457 | return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 458 | vma->vm_end - vma->vm_start, |
| 459 | vma->vm_page_prot); |
Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 460 | else { |
| 461 | pr_err("%s: cannot map system heap uncached\n", __func__); |
| 462 | return -EINVAL; |
| 463 | } |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 464 | } |
| 465 | |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 466 | static int ion_system_contig_print_debug(struct ion_heap *heap, |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 467 | struct seq_file *s, |
| 468 | const struct rb_root *unused) |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 469 | { |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 470 | seq_printf(s, "total bytes currently allocated: %lx\n", |
| 471 | (unsigned long) atomic_read(&system_contig_heap_allocated)); |
| 472 | |
| 473 | return 0; |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 474 | } |
| 475 | |
Rohit Vaswani | 35edc88 | 2012-11-20 10:20:47 -0800 | [diff] [blame] | 476 | void *ion_system_contig_heap_map_kernel(struct ion_heap *heap, |
| 477 | struct ion_buffer *buffer) |
| 478 | { |
| 479 | return buffer->priv_virt; |
| 480 | } |
| 481 | |
| 482 | void ion_system_contig_heap_unmap_kernel(struct ion_heap *heap, |
| 483 | struct ion_buffer *buffer) |
| 484 | { |
| 485 | return; |
| 486 | } |
| 487 | |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 488 | static struct ion_heap_ops kmalloc_ops = { |
| 489 | .allocate = ion_system_contig_heap_allocate, |
| 490 | .free = ion_system_contig_heap_free, |
| 491 | .phys = ion_system_contig_heap_phys, |
| 492 | .map_dma = ion_system_contig_heap_map_dma, |
Rebecca Schultz Zavin | b179067 | 2012-06-14 15:08:53 -0700 | [diff] [blame] | 493 | .unmap_dma = ion_system_contig_heap_unmap_dma, |
Rohit Vaswani | 35edc88 | 2012-11-20 10:20:47 -0800 | [diff] [blame] | 494 | .map_kernel = ion_system_contig_heap_map_kernel, |
| 495 | .unmap_kernel = ion_system_contig_heap_unmap_kernel, |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 496 | .map_user = ion_system_contig_heap_map_user, |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 497 | .print_debug = ion_system_contig_print_debug, |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 498 | }; |
| 499 | |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 500 | struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *pheap) |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 501 | { |
| 502 | struct ion_heap *heap; |
| 503 | |
| 504 | heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); |
| 505 | if (!heap) |
| 506 | return ERR_PTR(-ENOMEM); |
| 507 | heap->ops = &kmalloc_ops; |
| 508 | heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 509 | return heap; |
| 510 | } |
| 511 | |
| 512 | void ion_system_contig_heap_destroy(struct ion_heap *heap) |
| 513 | { |
| 514 | kfree(heap); |
| 515 | } |
| 516 | |