blob: d9e9e0967ceb409843514fbaddd61fedf4294151 [file] [log] [blame]
Laura Abbott8c017362011-09-22 20:59:12 -07001/*
Mitchel Humpherysd1a69032013-01-31 10:30:40 -08002 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Laura Abbott8c017362011-09-22 20:59:12 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/err.h>
14#include <linux/io.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070015#include <linux/msm_ion.h>
Laura Abbott8c017362011-09-22 20:59:12 -070016#include <linux/mm.h>
Mitchel Humpherysb358e072013-02-01 18:30:14 -080017#include <linux/highmem.h>
Laura Abbott8c017362011-09-22 20:59:12 -070018#include <linux/scatterlist.h>
19#include <linux/slab.h>
20#include <linux/vmalloc.h>
21#include <linux/iommu.h>
22#include <linux/pfn.h>
Laura Abbott60c92c72012-10-10 13:12:52 -070023#include <linux/dma-mapping.h>
Laura Abbott8c017362011-09-22 20:59:12 -070024#include "ion_priv.h"
25
26#include <asm/mach/map.h>
27#include <asm/page.h>
Olav Haugan85c95402012-05-30 17:32:37 -070028#include <asm/cacheflush.h>
Laura Abbott8c017362011-09-22 20:59:12 -070029#include <mach/iommu_domains.h>
Adrian Alexei21f62bd2013-04-22 12:57:41 -070030#include <trace/events/kmem.h>
Laura Abbott8c017362011-09-22 20:59:12 -070031
32struct ion_iommu_heap {
33 struct ion_heap heap;
Laura Abbott5d9cca92013-06-06 14:09:41 -070034 struct ion_page_pool **cached_pools;
35 struct ion_page_pool **uncached_pools;
Laura Abbott8c017362011-09-22 20:59:12 -070036};
37
Mitchel Humpherysb358e072013-02-01 18:30:14 -080038/*
39 * We will attempt to allocate high-order pages and store those in an
40 * sg_list. However, some APIs expect an array of struct page * where
41 * each page is of size PAGE_SIZE. We use this extra structure to
42 * carry around an array of such pages (derived from the high-order
43 * pages with nth_page).
44 */
Laura Abbott8c017362011-09-22 20:59:12 -070045struct ion_iommu_priv_data {
46 struct page **pages;
Olav Haugan8909e3b2013-05-17 17:09:49 -070047 unsigned int pages_uses_vmalloc;
Laura Abbott8c017362011-09-22 20:59:12 -070048 int nrpages;
49 unsigned long size;
50};
51
Mitchel Humpherysd1a69032013-01-31 10:30:40 -080052#define MAX_VMAP_RETRIES 10
Laura Abbott5d9cca92013-06-06 14:09:41 -070053#define BAD_ORDER -1
Mitchel Humpherysd1a69032013-01-31 10:30:40 -080054
Chintan Pandya1604cf62013-06-24 18:36:43 -070055static const unsigned int orders[] = {9, 8, 4, 0};
Mitchel Humpherysb358e072013-02-01 18:30:14 -080056static const int num_orders = ARRAY_SIZE(orders);
Laura Abbott5d9cca92013-06-06 14:09:41 -070057static unsigned int low_gfp_flags = __GFP_HIGHMEM | GFP_KERNEL | __GFP_ZERO;
58static unsigned int high_gfp_flags = (__GFP_HIGHMEM | __GFP_NORETRY
59 | __GFP_NO_KSWAPD | __GFP_NOWARN |
60 __GFP_IO | __GFP_FS | __GFP_ZERO);
Mitchel Humpherysb358e072013-02-01 18:30:14 -080061
62struct page_info {
63 struct page *page;
64 unsigned int order;
65 struct list_head list;
66};
67
Laura Abbott5d9cca92013-06-06 14:09:41 -070068static int order_to_index(unsigned int order)
69{
70 int i;
71 for (i = 0; i < num_orders; i++)
72 if (order == orders[i])
73 return i;
74 BUG();
75 return BAD_ORDER;
76}
77
Mitchel Humpherysb358e072013-02-01 18:30:14 -080078static unsigned int order_to_size(int order)
79{
80 return PAGE_SIZE << order;
81}
82
Laura Abbott5d9cca92013-06-06 14:09:41 -070083static struct page_info *alloc_largest_available(struct ion_iommu_heap *heap,
84 unsigned long size,
85 unsigned int max_order,
86 unsigned long flags)
Mitchel Humpherysb358e072013-02-01 18:30:14 -080087{
88 struct page *page;
89 struct page_info *info;
90 int i;
91
92 for (i = 0; i < num_orders; i++) {
Mitchel Humpherysff6ea622013-02-20 13:55:46 -080093 gfp_t gfp;
Laura Abbott5d9cca92013-06-06 14:09:41 -070094 int idx = order_to_index(orders[i]);
95 struct ion_page_pool *pool;
96
97 if (idx == BAD_ORDER)
98 continue;
99
100 if (ION_IS_CACHED(flags)) {
101 pool = heap->cached_pools[idx];
102 BUG_ON(!pool);
103 } else {
104 pool = heap->uncached_pools[idx];
105 BUG_ON(!pool);
106 }
107
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800108 if (size < order_to_size(orders[i]))
109 continue;
110 if (max_order < orders[i])
111 continue;
112
Olav Haugan041d4b52013-04-02 14:03:34 -0700113 if (orders[i]) {
Laura Abbott5d9cca92013-06-06 14:09:41 -0700114 gfp = high_gfp_flags;
Olav Haugan041d4b52013-04-02 14:03:34 -0700115 } else {
Laura Abbott5d9cca92013-06-06 14:09:41 -0700116 gfp = low_gfp_flags;
Olav Haugan041d4b52013-04-02 14:03:34 -0700117 }
Adrian Alexei21f62bd2013-04-22 12:57:41 -0700118 trace_alloc_pages_iommu_start(gfp, orders[i]);
Laura Abbott5d9cca92013-06-06 14:09:41 -0700119 if (flags & ION_FLAG_POOL_FORCE_ALLOC)
120 page = alloc_pages(gfp, orders[i]);
121 else
122 page = ion_page_pool_alloc(pool);
Adrian Alexei21f62bd2013-04-22 12:57:41 -0700123 trace_alloc_pages_iommu_end(gfp, orders[i]);
124 if (!page) {
125 trace_alloc_pages_iommu_fail(gfp, orders[i]);
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800126 continue;
Adrian Alexei21f62bd2013-04-22 12:57:41 -0700127 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800128
129 info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
Olav Haugan6093f3a2013-06-26 16:04:33 -0700130 if (info) {
131 info->page = page;
132 info->order = orders[i];
133 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800134 return info;
135 }
136 return NULL;
137}
138
Laura Abbottc2368fb2013-08-12 16:57:23 -0700139static int ion_iommu_buffer_zero(struct ion_iommu_priv_data *data,
140 bool is_cached)
Laura Abbott5d9cca92013-06-06 14:09:41 -0700141{
Laura Abbottc2368fb2013-08-12 16:57:23 -0700142 int i, j, k;
Laura Abbott5d9cca92013-06-06 14:09:41 -0700143 unsigned int npages_to_vmap;
144 unsigned int total_pages;
145 void *ptr = NULL;
Laura Abbottc2368fb2013-08-12 16:57:23 -0700146 /*
147 * It's cheaper just to use writecombine memory and skip the
148 * cache vs. using a cache memory and trying to flush it afterwards
149 */
150 pgprot_t pgprot = pgprot_writecombine(pgprot_kernel);
Laura Abbott5d9cca92013-06-06 14:09:41 -0700151
152 /*
153 * As an optimization, we manually zero out all of the
154 * pages in one fell swoop here. To safeguard against
155 * insufficient vmalloc space, we only vmap
156 * `npages_to_vmap' at a time, starting with a
157 * conservative estimate of 1/8 of the total number of
158 * vmalloc pages available. Note that the `pages'
159 * array is composed of all 4K pages, irrespective of
160 * the size of the pages on the sg list.
161 */
162 npages_to_vmap = ((VMALLOC_END - VMALLOC_START)/8)
163 >> PAGE_SHIFT;
164 total_pages = data->nrpages;
165 for (i = 0; i < total_pages; i += npages_to_vmap) {
166 npages_to_vmap = min(npages_to_vmap, total_pages - i);
167 for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap;
168 ++j) {
169 ptr = vmap(&data->pages[i], npages_to_vmap,
Laura Abbottc2368fb2013-08-12 16:57:23 -0700170 VM_IOREMAP, pgprot);
Laura Abbott5d9cca92013-06-06 14:09:41 -0700171 if (ptr)
172 break;
173 else
174 npages_to_vmap >>= 1;
175 }
176 if (!ptr)
177 return -ENOMEM;
178
179 memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
Laura Abbottc2368fb2013-08-12 16:57:23 -0700180 if (is_cached) {
181 /*
182 * invalidate the cache to pick up the zeroing
183 */
184 for (k = 0; k < npages_to_vmap; k++) {
185 void *p = kmap_atomic(data->pages[i + k]);
186 phys_addr_t phys = page_to_phys(
187 data->pages[i + k]);
188
189 dmac_inv_range(p, p + PAGE_SIZE);
190 outer_inv_range(phys, phys + PAGE_SIZE);
191 kunmap_atomic(p);
192 }
193 }
Laura Abbott5d9cca92013-06-06 14:09:41 -0700194 vunmap(ptr);
195 }
196
197 return 0;
198}
199
Laura Abbott8c017362011-09-22 20:59:12 -0700200static int ion_iommu_heap_allocate(struct ion_heap *heap,
201 struct ion_buffer *buffer,
202 unsigned long size, unsigned long align,
203 unsigned long flags)
204{
205 int ret, i;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800206 struct list_head pages_list;
207 struct page_info *info, *tmp_info;
Laura Abbott8c017362011-09-22 20:59:12 -0700208 struct ion_iommu_priv_data *data = NULL;
Laura Abbott5d9cca92013-06-06 14:09:41 -0700209 struct ion_iommu_heap *iommu_heap =
210 container_of(heap, struct ion_iommu_heap, heap);
Laura Abbott8c017362011-09-22 20:59:12 -0700211
212 if (msm_use_iommu()) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800213 struct scatterlist *sg;
214 struct sg_table *table;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800215 int j;
Laura Abbott5d9cca92013-06-06 14:09:41 -0700216 unsigned int num_large_pages = 0;
Olav Haugand8770692013-04-17 16:11:31 -0700217 unsigned long size_remaining = PAGE_ALIGN(size);
Mitchel Humpherys1010c7f2013-02-22 17:36:16 -0800218 unsigned int max_order = ION_IS_CACHED(flags) ? 0 : orders[0];
Olav Haugan8909e3b2013-05-17 17:09:49 -0700219 unsigned int page_tbl_size;
Laura Abbottb14ed962012-01-30 14:18:08 -0800220
Laura Abbott8c017362011-09-22 20:59:12 -0700221 data = kmalloc(sizeof(*data), GFP_KERNEL);
222 if (!data)
223 return -ENOMEM;
224
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800225 INIT_LIST_HEAD(&pages_list);
226 while (size_remaining > 0) {
Laura Abbott5d9cca92013-06-06 14:09:41 -0700227 info = alloc_largest_available(iommu_heap,
228 size_remaining,
229 max_order,
230 flags);
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800231 if (!info) {
232 ret = -ENOMEM;
233 goto err_free_data;
234 }
235 list_add_tail(&info->list, &pages_list);
236 size_remaining -= order_to_size(info->order);
237 max_order = info->order;
238 num_large_pages++;
239 }
240
Laura Abbott8c017362011-09-22 20:59:12 -0700241 data->size = PFN_ALIGN(size);
242 data->nrpages = data->size >> PAGE_SHIFT;
Olav Haugan8909e3b2013-05-17 17:09:49 -0700243 data->pages_uses_vmalloc = 0;
244 page_tbl_size = sizeof(struct page *) * data->nrpages;
245
246 if (page_tbl_size > SZ_8K) {
247 /*
248 * Do fallback to ensure we have a balance between
249 * performance and availability.
250 */
251 data->pages = kmalloc(page_tbl_size,
252 __GFP_COMP | __GFP_NORETRY |
253 __GFP_NO_KSWAPD | __GFP_NOWARN);
254 if (!data->pages) {
255 data->pages = vmalloc(page_tbl_size);
256 data->pages_uses_vmalloc = 1;
257 }
258 } else {
259 data->pages = kmalloc(page_tbl_size, GFP_KERNEL);
260 }
Laura Abbott8c017362011-09-22 20:59:12 -0700261 if (!data->pages) {
262 ret = -ENOMEM;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800263 goto err_free_data;
Laura Abbott8c017362011-09-22 20:59:12 -0700264 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800265
266 table = buffer->sg_table =
267 kzalloc(sizeof(struct sg_table), GFP_KERNEL);
268
269 if (!table) {
Olav Haugan16cdb412012-03-27 13:02:17 -0700270 ret = -ENOMEM;
271 goto err1;
272 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800273 ret = sg_alloc_table(table, num_large_pages, GFP_KERNEL);
Laura Abbottb14ed962012-01-30 14:18:08 -0800274 if (ret)
275 goto err2;
Olav Haugan16cdb412012-03-27 13:02:17 -0700276
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800277 i = 0;
278 sg = table->sgl;
279 list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
280 struct page *page = info->page;
281 sg_set_page(sg, page, order_to_size(info->order), 0);
Laura Abbott60c92c72012-10-10 13:12:52 -0700282 sg_dma_address(sg) = sg_phys(sg);
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800283 sg = sg_next(sg);
284 for (j = 0; j < (1 << info->order); ++j)
285 data->pages[i++] = nth_page(page, j);
286 list_del(&info->list);
287 kfree(info);
Laura Abbott8c017362011-09-22 20:59:12 -0700288 }
289
Laura Abbott5d9cca92013-06-06 14:09:41 -0700290
291 if (flags & ION_FLAG_POOL_FORCE_ALLOC) {
Laura Abbottc2368fb2013-08-12 16:57:23 -0700292 ret = ion_iommu_buffer_zero(data, ION_IS_CACHED(flags));
Laura Abbott5d9cca92013-06-06 14:09:41 -0700293 if (ret) {
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800294 pr_err("Couldn't vmap the pages for zeroing\n");
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800295 goto err3;
296 }
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800297
Laura Abbott5d9cca92013-06-06 14:09:41 -0700298
299 if (!ION_IS_CACHED(flags))
300 dma_sync_sg_for_device(NULL, table->sgl,
301 table->nents,
Laura Abbott60c92c72012-10-10 13:12:52 -0700302 DMA_BIDIRECTIONAL);
303
Laura Abbott5d9cca92013-06-06 14:09:41 -0700304 }
Laura Abbott8c017362011-09-22 20:59:12 -0700305 buffer->priv_virt = data;
306 return 0;
307
308 } else {
309 return -ENOMEM;
310 }
311
312
Laura Abbottb14ed962012-01-30 14:18:08 -0800313err3:
314 sg_free_table(buffer->sg_table);
Laura Abbott8c017362011-09-22 20:59:12 -0700315err2:
Laura Abbottb14ed962012-01-30 14:18:08 -0800316 kfree(buffer->sg_table);
317 buffer->sg_table = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700318err1:
Olav Haugan8909e3b2013-05-17 17:09:49 -0700319 if (data->pages_uses_vmalloc)
320 vfree(data->pages);
321 else
322 kfree(data->pages);
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800323err_free_data:
Laura Abbott8c017362011-09-22 20:59:12 -0700324 kfree(data);
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800325
326 list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
327 if (info->page)
328 __free_pages(info->page, info->order);
329 list_del(&info->list);
330 kfree(info);
331 }
Laura Abbott8c017362011-09-22 20:59:12 -0700332 return ret;
333}
334
335static void ion_iommu_heap_free(struct ion_buffer *buffer)
336{
Laura Abbott8c017362011-09-22 20:59:12 -0700337 int i;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800338 struct scatterlist *sg;
339 struct sg_table *table = buffer->sg_table;
340 struct ion_iommu_priv_data *data = buffer->priv_virt;
Laura Abbott5d9cca92013-06-06 14:09:41 -0700341 bool cached = ion_buffer_cached(buffer);
342 struct ion_iommu_heap *iommu_heap =
343 container_of(buffer->heap, struct ion_iommu_heap, heap);
Laura Abbott8c017362011-09-22 20:59:12 -0700344
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800345 if (!table)
346 return;
Laura Abbott8c017362011-09-22 20:59:12 -0700347 if (!data)
348 return;
349
Laura Abbott5d9cca92013-06-06 14:09:41 -0700350 if (!(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC))
Laura Abbottc2368fb2013-08-12 16:57:23 -0700351 ion_iommu_buffer_zero(data, ION_IS_CACHED(buffer->flags));
Laura Abbott5d9cca92013-06-06 14:09:41 -0700352
353 for_each_sg(table->sgl, sg, table->nents, i) {
354 int order = get_order(sg_dma_len(sg));
355 int idx = order_to_index(order);
356 struct ion_page_pool *pool;
357
358 if (idx == BAD_ORDER) {
359 WARN_ON(1);
360 continue;
361 }
362
363 if (cached)
364 pool = iommu_heap->cached_pools[idx];
365 else
366 pool = iommu_heap->uncached_pools[idx];
367
368 if (buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)
369 __free_pages(sg_page(sg), order);
370 else
371 ion_page_pool_free(pool, sg_page(sg));
372 }
Laura Abbott8c017362011-09-22 20:59:12 -0700373
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800374 sg_free_table(table);
375 kfree(table);
376 table = 0;
Olav Haugan8909e3b2013-05-17 17:09:49 -0700377 if (data->pages_uses_vmalloc)
378 vfree(data->pages);
379 else
380 kfree(data->pages);
Laura Abbott8c017362011-09-22 20:59:12 -0700381 kfree(data);
382}
383
384void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
Laura Abbottb14ed962012-01-30 14:18:08 -0800385 struct ion_buffer *buffer)
Laura Abbott8c017362011-09-22 20:59:12 -0700386{
387 struct ion_iommu_priv_data *data = buffer->priv_virt;
388 pgprot_t page_prot = PAGE_KERNEL;
389
390 if (!data)
391 return NULL;
392
Laura Abbottb14ed962012-01-30 14:18:08 -0800393 if (!ION_IS_CACHED(buffer->flags))
Olav Haugan08f911b2012-12-13 09:51:59 -0800394 page_prot = pgprot_writecombine(page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700395
396 buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
397
398 return buffer->vaddr;
399}
400
401void ion_iommu_heap_unmap_kernel(struct ion_heap *heap,
402 struct ion_buffer *buffer)
403{
404 if (!buffer->vaddr)
405 return;
406
407 vunmap(buffer->vaddr);
408 buffer->vaddr = NULL;
409}
410
411int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800412 struct vm_area_struct *vma)
Laura Abbott8c017362011-09-22 20:59:12 -0700413{
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800414 struct sg_table *table = buffer->sg_table;
415 unsigned long addr = vma->vm_start;
416 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
417 struct scatterlist *sg;
Laura Abbott8c017362011-09-22 20:59:12 -0700418 int i;
Laura Abbott8c017362011-09-22 20:59:12 -0700419
Laura Abbottb14ed962012-01-30 14:18:08 -0800420 if (!ION_IS_CACHED(buffer->flags))
Olav Haugande074a72012-02-22 15:39:54 -0800421 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700422
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800423 for_each_sg(table->sgl, sg, table->nents, i) {
424 struct page *page = sg_page(sg);
425 unsigned long remainder = vma->vm_end - addr;
426 unsigned long len = sg_dma_len(sg);
427
428 if (offset >= sg_dma_len(sg)) {
429 offset -= sg_dma_len(sg);
430 continue;
431 } else if (offset) {
432 page += offset / PAGE_SIZE;
433 len = sg_dma_len(sg) - offset;
434 offset = 0;
Olav Haugandbec7db2012-02-25 10:32:41 -0800435 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800436 len = min(len, remainder);
437 remap_pfn_range(vma, addr, page_to_pfn(page), len,
438 vma->vm_page_prot);
439 addr += len;
440 if (addr >= vma->vm_end)
441 return 0;
Olav Haugandbec7db2012-02-25 10:32:41 -0800442 }
Laura Abbott8c017362011-09-22 20:59:12 -0700443 return 0;
444}
445
Laura Abbottb14ed962012-01-30 14:18:08 -0800446static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap,
Olav Hauganab804b82012-03-05 14:41:16 -0800447 struct ion_buffer *buffer)
448{
Laura Abbottb14ed962012-01-30 14:18:08 -0800449 return buffer->sg_table;
Olav Hauganab804b82012-03-05 14:41:16 -0800450}
451
452static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
453 struct ion_buffer *buffer)
454{
Olav Hauganab804b82012-03-05 14:41:16 -0800455}
456
Laura Abbott5d9cca92013-06-06 14:09:41 -0700457static int ion_iommu_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
458 void *unused)
459{
460
461 struct ion_iommu_heap *iommu_heap = container_of(heap,
462 struct ion_iommu_heap,
463 heap);
464 int i;
465 unsigned long total = 0;
466
467 seq_printf(s, "Cached Pools:\n");
468 for (i = 0; i < num_orders; i++) {
469 struct ion_page_pool *pool = iommu_heap->cached_pools[i];
470 seq_printf(s, "%d order %u highmem pages in pool = %lx total\n",
471 pool->high_count, pool->order,
472 (1 << pool->order) * PAGE_SIZE * pool->high_count);
473 seq_printf(s, "%d order %u lowmem pages in pool = %lx total\n",
474 pool->low_count, pool->order,
475 (1 << pool->order) * PAGE_SIZE * pool->low_count);
476
477 total += (1 << pool->order) * PAGE_SIZE *
478 (pool->low_count + pool->high_count);
479 }
480
481 seq_printf(s, "Uncached Pools:\n");
482 for (i = 0; i < num_orders; i++) {
483 struct ion_page_pool *pool = iommu_heap->uncached_pools[i];
484 seq_printf(s, "%d order %u highmem pages in pool = %lx total\n",
485 pool->high_count, pool->order,
486 (1 << pool->order) * PAGE_SIZE * pool->high_count);
487 seq_printf(s, "%d order %u lowmem pages in pool = %lx total\n",
488 pool->low_count, pool->order,
489 (1 << pool->order) * PAGE_SIZE * pool->low_count);
490
491 total += (1 << pool->order) * PAGE_SIZE *
492 (pool->low_count + pool->high_count);
493 }
494 seq_printf(s, "Total bytes in pool: %lx\n", total);
495 return 0;
496}
497
Laura Abbott8c017362011-09-22 20:59:12 -0700498static struct ion_heap_ops iommu_heap_ops = {
499 .allocate = ion_iommu_heap_allocate,
500 .free = ion_iommu_heap_free,
501 .map_user = ion_iommu_heap_map_user,
502 .map_kernel = ion_iommu_heap_map_kernel,
503 .unmap_kernel = ion_iommu_heap_unmap_kernel,
Olav Hauganab804b82012-03-05 14:41:16 -0800504 .map_dma = ion_iommu_heap_map_dma,
505 .unmap_dma = ion_iommu_heap_unmap_dma,
Laura Abbott8c017362011-09-22 20:59:12 -0700506};
507
508struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
509{
510 struct ion_iommu_heap *iommu_heap;
Laura Abbott5d9cca92013-06-06 14:09:41 -0700511 int i;
Laura Abbott8c017362011-09-22 20:59:12 -0700512
513 iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL);
514 if (!iommu_heap)
515 return ERR_PTR(-ENOMEM);
516
517 iommu_heap->heap.ops = &iommu_heap_ops;
518 iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
Laura Abbott5d9cca92013-06-06 14:09:41 -0700519 iommu_heap->uncached_pools = kzalloc(
520 sizeof(struct ion_page_pool *) * num_orders,
521 GFP_KERNEL);
522 if (!iommu_heap->uncached_pools)
523 goto err_alloc_uncached_pools;
Laura Abbott8c017362011-09-22 20:59:12 -0700524
Laura Abbott5d9cca92013-06-06 14:09:41 -0700525 iommu_heap->cached_pools = kzalloc(
526 sizeof(struct ion_page_pool *) * num_orders,
527 GFP_KERNEL);
528
529 if (!iommu_heap->cached_pools)
530 goto err_alloc_cached_pools;
531
532 for (i = 0; i < num_orders; i++) {
533 struct ion_page_pool *pool;
534 gfp_t gfp_flags;
535
536 if (orders[i])
537 gfp_flags = high_gfp_flags | __GFP_ZERO;
538 else
539 gfp_flags = low_gfp_flags | __GFP_ZERO;
540 pool = ion_page_pool_create(gfp_flags, orders[i]);
541 if (!pool)
542 goto err_create_cached_pool;
543 iommu_heap->cached_pools[i] = pool;
544 }
545
546 for (i = 0; i < num_orders; i++) {
547 struct ion_page_pool *pool;
548 gfp_t gfp_flags;
549
550 if (orders[i])
551 gfp_flags = high_gfp_flags | __GFP_ZERO;
552 else
553 gfp_flags = low_gfp_flags | __GFP_ZERO;
554 pool = ion_page_pool_create(gfp_flags, orders[i]);
555 if (!pool)
556 goto err_create_uncached_pool;
557 iommu_heap->uncached_pools[i] = pool;
558 }
559 iommu_heap->heap.debug_show = ion_iommu_heap_debug_show;
Laura Abbott8c017362011-09-22 20:59:12 -0700560 return &iommu_heap->heap;
Laura Abbott5d9cca92013-06-06 14:09:41 -0700561
562err_create_uncached_pool:
563 for (i = 0; i < num_orders; i++)
564 if (iommu_heap->cached_pools[i])
565 ion_page_pool_destroy(iommu_heap->uncached_pools[i]);
566
567
568err_create_cached_pool:
569 for (i = 0; i < num_orders; i++)
570 if (iommu_heap->uncached_pools[i])
571 ion_page_pool_destroy(iommu_heap->cached_pools[i]);
572
573 kfree(iommu_heap->cached_pools);
574err_alloc_cached_pools:
575 kfree(iommu_heap->uncached_pools);
576err_alloc_uncached_pools:
577 kfree(iommu_heap);
578 return ERR_PTR(-ENOMEM);
Laura Abbott8c017362011-09-22 20:59:12 -0700579}
580
581void ion_iommu_heap_destroy(struct ion_heap *heap)
582{
583 struct ion_iommu_heap *iommu_heap =
584 container_of(heap, struct ion_iommu_heap, heap);
585
586 kfree(iommu_heap);
587 iommu_heap = NULL;
588}