Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1 | /* |
| 2 | * drivers/gpu/ion/ion_system_heap.c |
| 3 | * |
| 4 | * Copyright (C) 2011 Google, Inc. |
Mitchel Humpherys | af3b522 | 2013-01-15 15:38:52 -0800 | [diff] [blame] | 5 | * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 6 | * |
| 7 | * This software is licensed under the terms of the GNU General Public |
| 8 | * License version 2, as published by the Free Software Foundation, and |
| 9 | * may be copied, distributed, and modified under those terms. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | */ |
| 17 | |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 18 | #include <asm/page.h> |
| 19 | #include <linux/dma-mapping.h> |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 20 | #include <linux/err.h> |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 21 | #include <linux/highmem.h> |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 22 | #include <linux/ion.h> |
| 23 | #include <linux/mm.h> |
| 24 | #include <linux/scatterlist.h> |
| 25 | #include <linux/slab.h> |
| 26 | #include <linux/vmalloc.h> |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 27 | #include <linux/iommu.h> |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 28 | #include <linux/seq_file.h> |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 29 | #include <mach/iommu_domains.h> |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 30 | #include "ion_priv.h" |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 31 | #include <mach/memory.h> |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 32 | #include <asm/cacheflush.h> |
Mitchel Humpherys | af2e5c5 | 2012-09-06 12:16:36 -0700 | [diff] [blame] | 33 | #include <linux/msm_ion.h> |
Neeti Desai | 3f3c282 | 2013-03-08 17:29:53 -0800 | [diff] [blame] | 34 | #include <linux/dma-mapping.h> |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 35 | |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 36 | static atomic_t system_heap_allocated; |
| 37 | static atomic_t system_contig_heap_allocated; |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 38 | static unsigned int system_heap_has_outer_cache; |
| 39 | static unsigned int system_heap_contig_has_outer_cache; |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 40 | |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 41 | struct page_info { |
| 42 | struct page *page; |
| 43 | unsigned long order; |
| 44 | struct list_head list; |
| 45 | }; |
| 46 | |
Rebecca Schultz Zavin | f858ba4 | 2012-09-21 11:46:06 -0700 | [diff] [blame] | 47 | static struct page_info *alloc_largest_available(unsigned long size, |
| 48 | bool split_pages) |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 49 | { |
| 50 | static unsigned int orders[] = {8, 4, 0}; |
| 51 | struct page *page; |
| 52 | struct page_info *info; |
| 53 | int i; |
| 54 | |
| 55 | for (i = 0; i < ARRAY_SIZE(orders); i++) { |
| 56 | if (size < (1 << orders[i]) * PAGE_SIZE) |
| 57 | continue; |
Dima Zavin | dcd71bc | 2012-07-27 15:29:55 -0700 | [diff] [blame] | 58 | page = alloc_pages(GFP_HIGHUSER | __GFP_ZERO | |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 59 | __GFP_NOWARN | __GFP_NORETRY, orders[i]); |
| 60 | if (!page) |
| 61 | continue; |
Rebecca Schultz Zavin | f858ba4 | 2012-09-21 11:46:06 -0700 | [diff] [blame] | 62 | if (split_pages) |
| 63 | split_page(page, orders[i]); |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 64 | info = kmap(page); |
| 65 | info->page = page; |
| 66 | info->order = orders[i]; |
| 67 | return info; |
| 68 | } |
| 69 | return NULL; |
| 70 | } |
| 71 | |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 72 | static int ion_system_heap_allocate(struct ion_heap *heap, |
| 73 | struct ion_buffer *buffer, |
| 74 | unsigned long size, unsigned long align, |
| 75 | unsigned long flags) |
| 76 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 77 | struct sg_table *table; |
| 78 | struct scatterlist *sg; |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 79 | int ret; |
| 80 | struct list_head pages; |
| 81 | struct page_info *info, *tmp_info; |
Rebecca Schultz Zavin | f858ba4 | 2012-09-21 11:46:06 -0700 | [diff] [blame] | 82 | int i = 0; |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 83 | long size_remaining = PAGE_ALIGN(size); |
Rebecca Schultz Zavin | f858ba4 | 2012-09-21 11:46:06 -0700 | [diff] [blame] | 84 | bool split_pages = ion_buffer_fault_user_mappings(buffer); |
| 85 | |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 86 | |
| 87 | INIT_LIST_HEAD(&pages); |
| 88 | while (size_remaining > 0) { |
Rebecca Schultz Zavin | f858ba4 | 2012-09-21 11:46:06 -0700 | [diff] [blame] | 89 | info = alloc_largest_available(size_remaining, split_pages); |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 90 | if (!info) |
| 91 | goto err; |
| 92 | list_add_tail(&info->list, &pages); |
| 93 | size_remaining -= (1 << info->order) * PAGE_SIZE; |
Rebecca Schultz Zavin | f858ba4 | 2012-09-21 11:46:06 -0700 | [diff] [blame] | 94 | i++; |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 95 | } |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 96 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 97 | table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); |
| 98 | if (!table) |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 99 | goto err; |
| 100 | |
Rebecca Schultz Zavin | f858ba4 | 2012-09-21 11:46:06 -0700 | [diff] [blame] | 101 | if (split_pages) |
| 102 | ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE, |
| 103 | GFP_KERNEL); |
| 104 | else |
| 105 | ret = sg_alloc_table(table, i, GFP_KERNEL); |
| 106 | |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 107 | if (ret) |
| 108 | goto err1; |
| 109 | |
| 110 | sg = table->sgl; |
| 111 | list_for_each_entry_safe(info, tmp_info, &pages, list) { |
| 112 | struct page *page = info->page; |
Rebecca Schultz Zavin | f858ba4 | 2012-09-21 11:46:06 -0700 | [diff] [blame] | 113 | |
| 114 | if (split_pages) { |
| 115 | for (i = 0; i < (1 << info->order); i++) { |
| 116 | sg_set_page(sg, page + i, PAGE_SIZE, 0); |
| 117 | sg = sg_next(sg); |
| 118 | } |
| 119 | } else { |
| 120 | sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, |
| 121 | 0); |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 122 | sg = sg_next(sg); |
| 123 | } |
| 124 | list_del(&info->list); |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 125 | kunmap(page); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 126 | } |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 127 | |
| 128 | dma_sync_sg_for_device(NULL, table->sgl, table->nents, |
| 129 | DMA_BIDIRECTIONAL); |
| 130 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 131 | buffer->priv_virt = table; |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 132 | atomic_add(size, &system_heap_allocated); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 133 | return 0; |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 134 | err1: |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 135 | kfree(table); |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 136 | err: |
| 137 | list_for_each_entry(info, &pages, list) { |
Rebecca Schultz Zavin | f858ba4 | 2012-09-21 11:46:06 -0700 | [diff] [blame] | 138 | if (split_pages) |
| 139 | for (i = 0; i < (1 << info->order); i++) |
| 140 | __free_page(info->page + i); |
| 141 | else |
| 142 | __free_pages(info->page, info->order); |
| 143 | |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 144 | kunmap(info->page); |
| 145 | } |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 146 | return -ENOMEM; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 147 | } |
| 148 | |
| 149 | void ion_system_heap_free(struct ion_buffer *buffer) |
| 150 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 151 | int i; |
| 152 | struct scatterlist *sg; |
| 153 | struct sg_table *table = buffer->priv_virt; |
| 154 | |
| 155 | for_each_sg(table->sgl, sg, table->nents, i) |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 156 | __free_pages(sg_page(sg), get_order(sg_dma_len(sg))); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 157 | if (buffer->sg_table) |
| 158 | sg_free_table(buffer->sg_table); |
| 159 | kfree(buffer->sg_table); |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 160 | atomic_sub(buffer->size, &system_heap_allocated); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 161 | } |
| 162 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 163 | struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap, |
| 164 | struct ion_buffer *buffer) |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 165 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 166 | return buffer->priv_virt; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 167 | } |
| 168 | |
| 169 | void ion_system_heap_unmap_dma(struct ion_heap *heap, |
| 170 | struct ion_buffer *buffer) |
| 171 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 172 | return; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 173 | } |
| 174 | |
| 175 | void *ion_system_heap_map_kernel(struct ion_heap *heap, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 176 | struct ion_buffer *buffer) |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 177 | { |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 178 | struct scatterlist *sg; |
| 179 | int i, j; |
| 180 | void *vaddr; |
| 181 | pgprot_t pgprot; |
| 182 | struct sg_table *table = buffer->priv_virt; |
| 183 | int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; |
| 184 | struct page **pages = kzalloc(sizeof(struct page *) * npages, |
| 185 | GFP_KERNEL); |
| 186 | struct page **tmp = pages; |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 187 | |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 188 | if (buffer->flags & ION_FLAG_CACHED) |
| 189 | pgprot = PAGE_KERNEL; |
| 190 | else |
| 191 | pgprot = pgprot_writecombine(PAGE_KERNEL); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 192 | |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 193 | for_each_sg(table->sgl, sg, table->nents, i) { |
| 194 | int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE; |
| 195 | struct page *page = sg_page(sg); |
| 196 | BUG_ON(i >= npages); |
| 197 | for (j = 0; j < npages_this_entry; j++) { |
| 198 | *(tmp++) = page++; |
| 199 | } |
Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 200 | } |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 201 | vaddr = vmap(pages, npages, VM_MAP, pgprot); |
| 202 | kfree(pages); |
| 203 | |
| 204 | return vaddr; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 205 | } |
| 206 | |
| 207 | void ion_system_heap_unmap_kernel(struct ion_heap *heap, |
| 208 | struct ion_buffer *buffer) |
| 209 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 210 | vunmap(buffer->vaddr); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 211 | } |
| 212 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 213 | void ion_system_heap_unmap_iommu(struct ion_iommu_map *data) |
| 214 | { |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 215 | unsigned int domain_num; |
| 216 | unsigned int partition_num; |
| 217 | struct iommu_domain *domain; |
| 218 | |
| 219 | if (!msm_use_iommu()) |
| 220 | return; |
| 221 | |
| 222 | domain_num = iommu_map_domain(data); |
| 223 | partition_num = iommu_map_partition(data); |
| 224 | |
| 225 | domain = msm_get_iommu_domain(domain_num); |
| 226 | |
| 227 | if (!domain) { |
| 228 | WARN(1, "Could not get domain %d. Corruption?\n", domain_num); |
| 229 | return; |
| 230 | } |
| 231 | |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 232 | iommu_unmap_range(domain, data->iova_addr, data->mapped_size); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 233 | msm_free_iova_address(data->iova_addr, domain_num, partition_num, |
| 234 | data->mapped_size); |
| 235 | |
| 236 | return; |
| 237 | } |
| 238 | |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 239 | int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 240 | struct vm_area_struct *vma) |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 241 | { |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 242 | struct sg_table *table = buffer->priv_virt; |
| 243 | unsigned long addr = vma->vm_start; |
| 244 | unsigned long offset = vma->vm_pgoff; |
| 245 | struct scatterlist *sg; |
| 246 | int i; |
| 247 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 248 | if (!ION_IS_CACHED(buffer->flags)) { |
Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 249 | pr_err("%s: cannot map system heap uncached\n", __func__); |
| 250 | return -EINVAL; |
| 251 | } |
Rebecca Schultz Zavin | b831c8c | 2012-06-14 13:30:01 -0700 | [diff] [blame] | 252 | |
| 253 | for_each_sg(table->sgl, sg, table->nents, i) { |
| 254 | if (offset) { |
| 255 | offset--; |
| 256 | continue; |
| 257 | } |
| 258 | remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)), |
| 259 | sg_dma_len(sg), vma->vm_page_prot); |
| 260 | addr += sg_dma_len(sg); |
| 261 | } |
| 262 | return 0; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 263 | } |
| 264 | |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 265 | int ion_system_heap_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer, |
| 266 | void *vaddr, unsigned int offset, unsigned int length, |
| 267 | unsigned int cmd) |
| 268 | { |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 269 | void (*outer_cache_op)(phys_addr_t, phys_addr_t); |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 270 | |
| 271 | switch (cmd) { |
| 272 | case ION_IOC_CLEAN_CACHES: |
Neeti Desai | 3f3c282 | 2013-03-08 17:29:53 -0800 | [diff] [blame] | 273 | if (!vaddr) |
| 274 | dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, |
| 275 | buffer->sg_table->nents, DMA_TO_DEVICE); |
| 276 | else |
| 277 | dmac_clean_range(vaddr, vaddr + length); |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 278 | outer_cache_op = outer_clean_range; |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 279 | break; |
| 280 | case ION_IOC_INV_CACHES: |
Neeti Desai | 3f3c282 | 2013-03-08 17:29:53 -0800 | [diff] [blame] | 281 | if (!vaddr) |
| 282 | dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl, |
| 283 | buffer->sg_table->nents, DMA_FROM_DEVICE); |
| 284 | else |
| 285 | dmac_inv_range(vaddr, vaddr + length); |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 286 | outer_cache_op = outer_inv_range; |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 287 | break; |
| 288 | case ION_IOC_CLEAN_INV_CACHES: |
Neeti Desai | 3f3c282 | 2013-03-08 17:29:53 -0800 | [diff] [blame] | 289 | if (!vaddr) { |
| 290 | dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, |
| 291 | buffer->sg_table->nents, DMA_TO_DEVICE); |
| 292 | dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl, |
| 293 | buffer->sg_table->nents, DMA_FROM_DEVICE); |
| 294 | } else { |
| 295 | dmac_flush_range(vaddr, vaddr + length); |
| 296 | } |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 297 | outer_cache_op = outer_flush_range; |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 298 | break; |
| 299 | default: |
| 300 | return -EINVAL; |
| 301 | } |
| 302 | |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 303 | if (system_heap_has_outer_cache) { |
| 304 | unsigned long pstart; |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 305 | struct sg_table *table = buffer->priv_virt; |
| 306 | struct scatterlist *sg; |
| 307 | int i; |
| 308 | for_each_sg(table->sgl, sg, table->nents, i) { |
| 309 | struct page *page = sg_page(sg); |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 310 | pstart = page_to_phys(page); |
| 311 | /* |
| 312 | * If page -> phys is returning NULL, something |
| 313 | * has really gone wrong... |
| 314 | */ |
| 315 | if (!pstart) { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 316 | WARN(1, "Could not translate virtual address to physical address\n"); |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 317 | return -EINVAL; |
| 318 | } |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 319 | outer_cache_op(pstart, pstart + PAGE_SIZE); |
| 320 | } |
| 321 | } |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 322 | return 0; |
| 323 | } |
| 324 | |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 325 | static int ion_system_print_debug(struct ion_heap *heap, struct seq_file *s, |
| 326 | const struct rb_root *unused) |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 327 | { |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 328 | seq_printf(s, "total bytes currently allocated: %lx\n", |
| 329 | (unsigned long) atomic_read(&system_heap_allocated)); |
| 330 | |
| 331 | return 0; |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 332 | } |
| 333 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 334 | int ion_system_heap_map_iommu(struct ion_buffer *buffer, |
| 335 | struct ion_iommu_map *data, |
| 336 | unsigned int domain_num, |
| 337 | unsigned int partition_num, |
| 338 | unsigned long align, |
| 339 | unsigned long iova_length, |
| 340 | unsigned long flags) |
| 341 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 342 | int ret = 0; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 343 | struct iommu_domain *domain; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 344 | unsigned long extra; |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 345 | unsigned long extra_iova_addr; |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 346 | struct sg_table *table = buffer->priv_virt; |
Olav Haugan | f310cf2 | 2012-05-08 08:42:49 -0700 | [diff] [blame] | 347 | int prot = IOMMU_WRITE | IOMMU_READ; |
| 348 | prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 349 | |
| 350 | if (!ION_IS_CACHED(flags)) |
| 351 | return -EINVAL; |
| 352 | |
| 353 | if (!msm_use_iommu()) |
| 354 | return -EINVAL; |
| 355 | |
| 356 | data->mapped_size = iova_length; |
| 357 | extra = iova_length - buffer->size; |
| 358 | |
Olav Haugan | 7237a07 | 2013-03-19 17:37:50 -0700 | [diff] [blame] | 359 | /* Use the biggest alignment to allow bigger IOMMU mappings. |
| 360 | * Use the first entry since the first entry will always be the |
| 361 | * biggest entry. To take advantage of bigger mapping sizes both the |
| 362 | * VA and PA addresses have to be aligned to the biggest size. |
| 363 | */ |
| 364 | if (table->sgl->length > align) |
| 365 | align = table->sgl->length; |
| 366 | |
Laura Abbott | d01221b | 2012-05-16 17:52:49 -0700 | [diff] [blame] | 367 | ret = msm_allocate_iova_address(domain_num, partition_num, |
| 368 | data->mapped_size, align, |
| 369 | &data->iova_addr); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 370 | |
Laura Abbott | d01221b | 2012-05-16 17:52:49 -0700 | [diff] [blame] | 371 | if (ret) |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 372 | goto out; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 373 | |
| 374 | domain = msm_get_iommu_domain(domain_num); |
| 375 | |
| 376 | if (!domain) { |
| 377 | ret = -ENOMEM; |
| 378 | goto out1; |
| 379 | } |
| 380 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 381 | ret = iommu_map_range(domain, data->iova_addr, table->sgl, |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 382 | buffer->size, prot); |
| 383 | |
| 384 | if (ret) { |
| 385 | pr_err("%s: could not map %lx in domain %p\n", |
| 386 | __func__, data->iova_addr, domain); |
| 387 | goto out1; |
| 388 | } |
| 389 | |
| 390 | extra_iova_addr = data->iova_addr + buffer->size; |
| 391 | if (extra) { |
Mitchel Humpherys | af3b522 | 2013-01-15 15:38:52 -0800 | [diff] [blame] | 392 | unsigned long phys_addr = sg_phys(table->sgl); |
| 393 | ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr, |
| 394 | extra, SZ_4K, prot); |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 395 | if (ret) |
| 396 | goto out2; |
| 397 | } |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 398 | return ret; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 399 | |
| 400 | out2: |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 401 | iommu_unmap_range(domain, data->iova_addr, buffer->size); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 402 | out1: |
| 403 | msm_free_iova_address(data->iova_addr, domain_num, partition_num, |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 404 | data->mapped_size); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 405 | out: |
| 406 | return ret; |
| 407 | } |
| 408 | |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 409 | static struct ion_heap_ops vmalloc_ops = { |
| 410 | .allocate = ion_system_heap_allocate, |
| 411 | .free = ion_system_heap_free, |
| 412 | .map_dma = ion_system_heap_map_dma, |
| 413 | .unmap_dma = ion_system_heap_unmap_dma, |
| 414 | .map_kernel = ion_system_heap_map_kernel, |
| 415 | .unmap_kernel = ion_system_heap_unmap_kernel, |
| 416 | .map_user = ion_system_heap_map_user, |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 417 | .cache_op = ion_system_heap_cache_ops, |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 418 | .print_debug = ion_system_print_debug, |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 419 | .map_iommu = ion_system_heap_map_iommu, |
| 420 | .unmap_iommu = ion_system_heap_unmap_iommu, |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 421 | }; |
| 422 | |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 423 | struct ion_heap *ion_system_heap_create(struct ion_platform_heap *pheap) |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 424 | { |
| 425 | struct ion_heap *heap; |
| 426 | |
| 427 | heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); |
| 428 | if (!heap) |
| 429 | return ERR_PTR(-ENOMEM); |
| 430 | heap->ops = &vmalloc_ops; |
| 431 | heap->type = ION_HEAP_TYPE_SYSTEM; |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 432 | system_heap_has_outer_cache = pheap->has_outer_cache; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 433 | return heap; |
| 434 | } |
| 435 | |
| 436 | void ion_system_heap_destroy(struct ion_heap *heap) |
| 437 | { |
| 438 | kfree(heap); |
| 439 | } |
| 440 | |
| 441 | static int ion_system_contig_heap_allocate(struct ion_heap *heap, |
| 442 | struct ion_buffer *buffer, |
| 443 | unsigned long len, |
| 444 | unsigned long align, |
| 445 | unsigned long flags) |
| 446 | { |
| 447 | buffer->priv_virt = kzalloc(len, GFP_KERNEL); |
| 448 | if (!buffer->priv_virt) |
| 449 | return -ENOMEM; |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 450 | atomic_add(len, &system_contig_heap_allocated); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 451 | return 0; |
| 452 | } |
| 453 | |
| 454 | void ion_system_contig_heap_free(struct ion_buffer *buffer) |
| 455 | { |
| 456 | kfree(buffer->priv_virt); |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 457 | atomic_sub(buffer->size, &system_contig_heap_allocated); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 458 | } |
| 459 | |
| 460 | static int ion_system_contig_heap_phys(struct ion_heap *heap, |
| 461 | struct ion_buffer *buffer, |
| 462 | ion_phys_addr_t *addr, size_t *len) |
| 463 | { |
| 464 | *addr = virt_to_phys(buffer->priv_virt); |
| 465 | *len = buffer->size; |
| 466 | return 0; |
| 467 | } |
| 468 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 469 | struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap, |
Rebecca Schultz Zavin | b179067 | 2012-06-14 15:08:53 -0700 | [diff] [blame] | 470 | struct ion_buffer *buffer) |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 471 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 472 | struct sg_table *table; |
| 473 | int ret; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 474 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 475 | table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); |
| 476 | if (!table) |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 477 | return ERR_PTR(-ENOMEM); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 478 | ret = sg_alloc_table(table, 1, GFP_KERNEL); |
| 479 | if (ret) { |
| 480 | kfree(table); |
| 481 | return ERR_PTR(ret); |
| 482 | } |
| 483 | sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size, |
| 484 | 0); |
| 485 | return table; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 486 | } |
| 487 | |
Rebecca Schultz Zavin | b179067 | 2012-06-14 15:08:53 -0700 | [diff] [blame] | 488 | void ion_system_contig_heap_unmap_dma(struct ion_heap *heap, |
| 489 | struct ion_buffer *buffer) |
| 490 | { |
| 491 | sg_free_table(buffer->sg_table); |
| 492 | kfree(buffer->sg_table); |
| 493 | } |
| 494 | |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 495 | int ion_system_contig_heap_map_user(struct ion_heap *heap, |
| 496 | struct ion_buffer *buffer, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 497 | struct vm_area_struct *vma) |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 498 | { |
| 499 | unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt)); |
Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 500 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 501 | if (ION_IS_CACHED(buffer->flags)) |
Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 502 | return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 503 | vma->vm_end - vma->vm_start, |
| 504 | vma->vm_page_prot); |
Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 505 | else { |
| 506 | pr_err("%s: cannot map system heap uncached\n", __func__); |
| 507 | return -EINVAL; |
| 508 | } |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 509 | } |
| 510 | |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 511 | int ion_system_contig_heap_cache_ops(struct ion_heap *heap, |
| 512 | struct ion_buffer *buffer, void *vaddr, |
| 513 | unsigned int offset, unsigned int length, |
| 514 | unsigned int cmd) |
| 515 | { |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 516 | void (*outer_cache_op)(phys_addr_t, phys_addr_t); |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 517 | |
| 518 | switch (cmd) { |
| 519 | case ION_IOC_CLEAN_CACHES: |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 520 | dmac_clean_range(vaddr, vaddr + length); |
| 521 | outer_cache_op = outer_clean_range; |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 522 | break; |
| 523 | case ION_IOC_INV_CACHES: |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 524 | dmac_inv_range(vaddr, vaddr + length); |
| 525 | outer_cache_op = outer_inv_range; |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 526 | break; |
| 527 | case ION_IOC_CLEAN_INV_CACHES: |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 528 | dmac_flush_range(vaddr, vaddr + length); |
| 529 | outer_cache_op = outer_flush_range; |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 530 | break; |
| 531 | default: |
| 532 | return -EINVAL; |
| 533 | } |
| 534 | |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 535 | if (system_heap_contig_has_outer_cache) { |
| 536 | unsigned long pstart; |
| 537 | |
| 538 | pstart = virt_to_phys(buffer->priv_virt) + offset; |
| 539 | if (!pstart) { |
| 540 | WARN(1, "Could not do virt to phys translation on %p\n", |
| 541 | buffer->priv_virt); |
| 542 | return -EINVAL; |
| 543 | } |
| 544 | |
| 545 | outer_cache_op(pstart, pstart + PAGE_SIZE); |
| 546 | } |
| 547 | |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 548 | return 0; |
| 549 | } |
| 550 | |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 551 | static int ion_system_contig_print_debug(struct ion_heap *heap, |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 552 | struct seq_file *s, |
| 553 | const struct rb_root *unused) |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 554 | { |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 555 | seq_printf(s, "total bytes currently allocated: %lx\n", |
| 556 | (unsigned long) atomic_read(&system_contig_heap_allocated)); |
| 557 | |
| 558 | return 0; |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 559 | } |
| 560 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 561 | int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer, |
| 562 | struct ion_iommu_map *data, |
| 563 | unsigned int domain_num, |
| 564 | unsigned int partition_num, |
| 565 | unsigned long align, |
| 566 | unsigned long iova_length, |
| 567 | unsigned long flags) |
| 568 | { |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 569 | int ret = 0; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 570 | struct iommu_domain *domain; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 571 | unsigned long extra; |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 572 | struct scatterlist *sglist = 0; |
| 573 | struct page *page = 0; |
Olav Haugan | f310cf2 | 2012-05-08 08:42:49 -0700 | [diff] [blame] | 574 | int prot = IOMMU_WRITE | IOMMU_READ; |
| 575 | prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 576 | |
| 577 | if (!ION_IS_CACHED(flags)) |
| 578 | return -EINVAL; |
| 579 | |
| 580 | if (!msm_use_iommu()) { |
| 581 | data->iova_addr = virt_to_phys(buffer->vaddr); |
| 582 | return 0; |
| 583 | } |
| 584 | |
| 585 | data->mapped_size = iova_length; |
| 586 | extra = iova_length - buffer->size; |
| 587 | |
Laura Abbott | d01221b | 2012-05-16 17:52:49 -0700 | [diff] [blame] | 588 | ret = msm_allocate_iova_address(domain_num, partition_num, |
| 589 | data->mapped_size, align, |
| 590 | &data->iova_addr); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 591 | |
Laura Abbott | d01221b | 2012-05-16 17:52:49 -0700 | [diff] [blame] | 592 | if (ret) |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 593 | goto out; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 594 | |
| 595 | domain = msm_get_iommu_domain(domain_num); |
| 596 | |
| 597 | if (!domain) { |
| 598 | ret = -ENOMEM; |
| 599 | goto out1; |
| 600 | } |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 601 | page = virt_to_page(buffer->vaddr); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 602 | |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 603 | sglist = vmalloc(sizeof(*sglist)); |
| 604 | if (!sglist) |
| 605 | goto out1; |
| 606 | |
| 607 | sg_init_table(sglist, 1); |
| 608 | sg_set_page(sglist, page, buffer->size, 0); |
| 609 | |
| 610 | ret = iommu_map_range(domain, data->iova_addr, sglist, |
| 611 | buffer->size, prot); |
| 612 | if (ret) { |
| 613 | pr_err("%s: could not map %lx in domain %p\n", |
| 614 | __func__, data->iova_addr, domain); |
| 615 | goto out1; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 616 | } |
| 617 | |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 618 | if (extra) { |
| 619 | unsigned long extra_iova_addr = data->iova_addr + buffer->size; |
Mitchel Humpherys | af3b522 | 2013-01-15 15:38:52 -0800 | [diff] [blame] | 620 | unsigned long phys_addr = sg_phys(sglist); |
| 621 | ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr, |
| 622 | extra, SZ_4K, prot); |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 623 | if (ret) |
| 624 | goto out2; |
| 625 | } |
| 626 | vfree(sglist); |
| 627 | return ret; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 628 | out2: |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 629 | iommu_unmap_range(domain, data->iova_addr, buffer->size); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 630 | |
| 631 | out1: |
Olav Haugan | 16cdb41 | 2012-03-27 13:02:17 -0700 | [diff] [blame] | 632 | vfree(sglist); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 633 | msm_free_iova_address(data->iova_addr, domain_num, partition_num, |
| 634 | data->mapped_size); |
| 635 | out: |
| 636 | return ret; |
| 637 | } |
| 638 | |
Rohit Vaswani | 35edc88 | 2012-11-20 10:20:47 -0800 | [diff] [blame] | 639 | void *ion_system_contig_heap_map_kernel(struct ion_heap *heap, |
| 640 | struct ion_buffer *buffer) |
| 641 | { |
| 642 | return buffer->priv_virt; |
| 643 | } |
| 644 | |
| 645 | void ion_system_contig_heap_unmap_kernel(struct ion_heap *heap, |
| 646 | struct ion_buffer *buffer) |
| 647 | { |
| 648 | return; |
| 649 | } |
| 650 | |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 651 | static struct ion_heap_ops kmalloc_ops = { |
| 652 | .allocate = ion_system_contig_heap_allocate, |
| 653 | .free = ion_system_contig_heap_free, |
| 654 | .phys = ion_system_contig_heap_phys, |
| 655 | .map_dma = ion_system_contig_heap_map_dma, |
Rebecca Schultz Zavin | b179067 | 2012-06-14 15:08:53 -0700 | [diff] [blame] | 656 | .unmap_dma = ion_system_contig_heap_unmap_dma, |
Rohit Vaswani | 35edc88 | 2012-11-20 10:20:47 -0800 | [diff] [blame] | 657 | .map_kernel = ion_system_contig_heap_map_kernel, |
| 658 | .unmap_kernel = ion_system_contig_heap_unmap_kernel, |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 659 | .map_user = ion_system_contig_heap_map_user, |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 660 | .cache_op = ion_system_contig_heap_cache_ops, |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 661 | .print_debug = ion_system_contig_print_debug, |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 662 | .map_iommu = ion_system_contig_heap_map_iommu, |
| 663 | .unmap_iommu = ion_system_heap_unmap_iommu, |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 664 | }; |
| 665 | |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 666 | struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *pheap) |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 667 | { |
| 668 | struct ion_heap *heap; |
| 669 | |
| 670 | heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); |
| 671 | if (!heap) |
| 672 | return ERR_PTR(-ENOMEM); |
| 673 | heap->ops = &kmalloc_ops; |
| 674 | heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; |
Olav Haugan | 85c9540 | 2012-05-30 17:32:37 -0700 | [diff] [blame] | 675 | system_heap_contig_has_outer_cache = pheap->has_outer_cache; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 676 | return heap; |
| 677 | } |
| 678 | |
| 679 | void ion_system_contig_heap_destroy(struct ion_heap *heap) |
| 680 | { |
| 681 | kfree(heap); |
| 682 | } |
| 683 | |