blob: b1c1c5d2e386fde85ed265cf512da922a22a17c0 [file] [log] [blame]
Laura Abbott8c017362011-09-22 20:59:12 -07001/*
Mitchel Humpherysd1a69032013-01-31 10:30:40 -08002 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Laura Abbott8c017362011-09-22 20:59:12 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/err.h>
14#include <linux/io.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070015#include <linux/msm_ion.h>
Laura Abbott8c017362011-09-22 20:59:12 -070016#include <linux/mm.h>
Mitchel Humpherysb358e072013-02-01 18:30:14 -080017#include <linux/highmem.h>
Laura Abbott8c017362011-09-22 20:59:12 -070018#include <linux/scatterlist.h>
19#include <linux/slab.h>
20#include <linux/vmalloc.h>
21#include <linux/iommu.h>
22#include <linux/pfn.h>
Laura Abbott60c92c72012-10-10 13:12:52 -070023#include <linux/dma-mapping.h>
Laura Abbott8c017362011-09-22 20:59:12 -070024#include "ion_priv.h"
25
26#include <asm/mach/map.h>
27#include <asm/page.h>
Olav Haugan85c95402012-05-30 17:32:37 -070028#include <asm/cacheflush.h>
Laura Abbott8c017362011-09-22 20:59:12 -070029#include <mach/iommu_domains.h>
Adrian Alexei21f62bd2013-04-22 12:57:41 -070030#include <trace/events/kmem.h>
Laura Abbott8c017362011-09-22 20:59:12 -070031
32struct ion_iommu_heap {
33 struct ion_heap heap;
34};
35
Mitchel Humpherysb358e072013-02-01 18:30:14 -080036/*
37 * We will attempt to allocate high-order pages and store those in an
38 * sg_list. However, some APIs expect an array of struct page * where
39 * each page is of size PAGE_SIZE. We use this extra structure to
40 * carry around an array of such pages (derived from the high-order
41 * pages with nth_page).
42 */
Laura Abbott8c017362011-09-22 20:59:12 -070043struct ion_iommu_priv_data {
44 struct page **pages;
Olav Haugan8909e3b2013-05-17 17:09:49 -070045 unsigned int pages_uses_vmalloc;
Laura Abbott8c017362011-09-22 20:59:12 -070046 int nrpages;
47 unsigned long size;
48};
49
Mitchel Humpherysd1a69032013-01-31 10:30:40 -080050#define MAX_VMAP_RETRIES 10
51
Mitchel Humpherysb358e072013-02-01 18:30:14 -080052static const unsigned int orders[] = {8, 4, 0};
53static const int num_orders = ARRAY_SIZE(orders);
54
55struct page_info {
56 struct page *page;
57 unsigned int order;
58 struct list_head list;
59};
60
61static unsigned int order_to_size(int order)
62{
63 return PAGE_SIZE << order;
64}
65
66static struct page_info *alloc_largest_available(unsigned long size,
67 unsigned int max_order)
68{
69 struct page *page;
70 struct page_info *info;
71 int i;
72
73 for (i = 0; i < num_orders; i++) {
Mitchel Humpherysff6ea622013-02-20 13:55:46 -080074 gfp_t gfp;
Mitchel Humpherysb358e072013-02-01 18:30:14 -080075 if (size < order_to_size(orders[i]))
76 continue;
77 if (max_order < orders[i])
78 continue;
79
Olav Haugan041d4b52013-04-02 14:03:34 -070080 gfp = __GFP_HIGHMEM;
Mitchel Humpherysff6ea622013-02-20 13:55:46 -080081
Olav Haugan041d4b52013-04-02 14:03:34 -070082 if (orders[i]) {
83 gfp |= __GFP_COMP | __GFP_NORETRY |
84 __GFP_NO_KSWAPD | __GFP_NOWARN;
85 } else {
86 gfp |= GFP_KERNEL;
87 }
Adrian Alexei21f62bd2013-04-22 12:57:41 -070088 trace_alloc_pages_iommu_start(gfp, orders[i]);
Mitchel Humpherysff6ea622013-02-20 13:55:46 -080089 page = alloc_pages(gfp, orders[i]);
Adrian Alexei21f62bd2013-04-22 12:57:41 -070090 trace_alloc_pages_iommu_end(gfp, orders[i]);
91 if (!page) {
92 trace_alloc_pages_iommu_fail(gfp, orders[i]);
Mitchel Humpherysb358e072013-02-01 18:30:14 -080093 continue;
Adrian Alexei21f62bd2013-04-22 12:57:41 -070094 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -080095
96 info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
97 info->page = page;
98 info->order = orders[i];
99 return info;
100 }
101 return NULL;
102}
103
Laura Abbott8c017362011-09-22 20:59:12 -0700104static int ion_iommu_heap_allocate(struct ion_heap *heap,
105 struct ion_buffer *buffer,
106 unsigned long size, unsigned long align,
107 unsigned long flags)
108{
109 int ret, i;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800110 struct list_head pages_list;
111 struct page_info *info, *tmp_info;
Laura Abbott8c017362011-09-22 20:59:12 -0700112 struct ion_iommu_priv_data *data = NULL;
113
114 if (msm_use_iommu()) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800115 struct scatterlist *sg;
116 struct sg_table *table;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800117 int j;
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800118 void *ptr = NULL;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800119 unsigned int npages_to_vmap, total_pages, num_large_pages = 0;
Olav Haugand8770692013-04-17 16:11:31 -0700120 unsigned long size_remaining = PAGE_ALIGN(size);
Mitchel Humpherys1010c7f2013-02-22 17:36:16 -0800121 unsigned int max_order = ION_IS_CACHED(flags) ? 0 : orders[0];
Olav Haugan8909e3b2013-05-17 17:09:49 -0700122 unsigned int page_tbl_size;
Laura Abbottb14ed962012-01-30 14:18:08 -0800123
Laura Abbott8c017362011-09-22 20:59:12 -0700124 data = kmalloc(sizeof(*data), GFP_KERNEL);
125 if (!data)
126 return -ENOMEM;
127
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800128 INIT_LIST_HEAD(&pages_list);
129 while (size_remaining > 0) {
130 info = alloc_largest_available(size_remaining,
131 max_order);
132 if (!info) {
133 ret = -ENOMEM;
134 goto err_free_data;
135 }
136 list_add_tail(&info->list, &pages_list);
137 size_remaining -= order_to_size(info->order);
138 max_order = info->order;
139 num_large_pages++;
140 }
141
Laura Abbott8c017362011-09-22 20:59:12 -0700142 data->size = PFN_ALIGN(size);
143 data->nrpages = data->size >> PAGE_SHIFT;
Olav Haugan8909e3b2013-05-17 17:09:49 -0700144 data->pages_uses_vmalloc = 0;
145 page_tbl_size = sizeof(struct page *) * data->nrpages;
146
147 if (page_tbl_size > SZ_8K) {
148 /*
149 * Do fallback to ensure we have a balance between
150 * performance and availability.
151 */
152 data->pages = kmalloc(page_tbl_size,
153 __GFP_COMP | __GFP_NORETRY |
154 __GFP_NO_KSWAPD | __GFP_NOWARN);
155 if (!data->pages) {
156 data->pages = vmalloc(page_tbl_size);
157 data->pages_uses_vmalloc = 1;
158 }
159 } else {
160 data->pages = kmalloc(page_tbl_size, GFP_KERNEL);
161 }
Laura Abbott8c017362011-09-22 20:59:12 -0700162 if (!data->pages) {
163 ret = -ENOMEM;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800164 goto err_free_data;
Laura Abbott8c017362011-09-22 20:59:12 -0700165 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800166
167 table = buffer->sg_table =
168 kzalloc(sizeof(struct sg_table), GFP_KERNEL);
169
170 if (!table) {
Olav Haugan16cdb412012-03-27 13:02:17 -0700171 ret = -ENOMEM;
172 goto err1;
173 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800174 ret = sg_alloc_table(table, num_large_pages, GFP_KERNEL);
Laura Abbottb14ed962012-01-30 14:18:08 -0800175 if (ret)
176 goto err2;
Olav Haugan16cdb412012-03-27 13:02:17 -0700177
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800178 i = 0;
179 sg = table->sgl;
180 list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
181 struct page *page = info->page;
182 sg_set_page(sg, page, order_to_size(info->order), 0);
Laura Abbott60c92c72012-10-10 13:12:52 -0700183 sg_dma_address(sg) = sg_phys(sg);
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800184 sg = sg_next(sg);
185 for (j = 0; j < (1 << info->order); ++j)
186 data->pages[i++] = nth_page(page, j);
187 list_del(&info->list);
188 kfree(info);
Laura Abbott8c017362011-09-22 20:59:12 -0700189 }
190
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800191 /*
192 * As an optimization, we omit __GFP_ZERO from
193 * alloc_page above and manually zero out all of the
194 * pages in one fell swoop here. To safeguard against
195 * insufficient vmalloc space, we only vmap
196 * `npages_to_vmap' at a time, starting with a
197 * conservative estimate of 1/8 of the total number of
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800198 * vmalloc pages available. Note that the `pages'
199 * array is composed of all 4K pages, irrespective of
200 * the size of the pages on the sg list.
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800201 */
202 npages_to_vmap = ((VMALLOC_END - VMALLOC_START)/8)
203 >> PAGE_SHIFT;
204 total_pages = data->nrpages;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800205 for (i = 0; i < total_pages; i += npages_to_vmap) {
206 npages_to_vmap = min(npages_to_vmap, total_pages - i);
207 for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap;
208 ++j) {
209 ptr = vmap(&data->pages[i], npages_to_vmap,
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800210 VM_IOREMAP, pgprot_kernel);
211 if (ptr)
212 break;
213 else
214 npages_to_vmap >>= 1;
215 }
216 if (!ptr) {
217 pr_err("Couldn't vmap the pages for zeroing\n");
218 ret = -ENOMEM;
219 goto err3;
220 }
221 memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
222 vunmap(ptr);
223 }
224
Laura Abbott60c92c72012-10-10 13:12:52 -0700225 if (!ION_IS_CACHED(flags))
226 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
227 DMA_BIDIRECTIONAL);
228
Laura Abbott8c017362011-09-22 20:59:12 -0700229 buffer->priv_virt = data;
230 return 0;
231
232 } else {
233 return -ENOMEM;
234 }
235
236
Laura Abbottb14ed962012-01-30 14:18:08 -0800237err3:
238 sg_free_table(buffer->sg_table);
Laura Abbott8c017362011-09-22 20:59:12 -0700239err2:
Laura Abbottb14ed962012-01-30 14:18:08 -0800240 kfree(buffer->sg_table);
241 buffer->sg_table = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700242err1:
Olav Haugan8909e3b2013-05-17 17:09:49 -0700243 if (data->pages_uses_vmalloc)
244 vfree(data->pages);
245 else
246 kfree(data->pages);
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800247err_free_data:
Laura Abbott8c017362011-09-22 20:59:12 -0700248 kfree(data);
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800249
250 list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
251 if (info->page)
252 __free_pages(info->page, info->order);
253 list_del(&info->list);
254 kfree(info);
255 }
Laura Abbott8c017362011-09-22 20:59:12 -0700256 return ret;
257}
258
259static void ion_iommu_heap_free(struct ion_buffer *buffer)
260{
Laura Abbott8c017362011-09-22 20:59:12 -0700261 int i;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800262 struct scatterlist *sg;
263 struct sg_table *table = buffer->sg_table;
264 struct ion_iommu_priv_data *data = buffer->priv_virt;
Laura Abbott8c017362011-09-22 20:59:12 -0700265
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800266 if (!table)
267 return;
Laura Abbott8c017362011-09-22 20:59:12 -0700268 if (!data)
269 return;
270
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800271 for_each_sg(table->sgl, sg, table->nents, i)
272 __free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
Laura Abbott8c017362011-09-22 20:59:12 -0700273
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800274 sg_free_table(table);
275 kfree(table);
276 table = 0;
Olav Haugan8909e3b2013-05-17 17:09:49 -0700277 if (data->pages_uses_vmalloc)
278 vfree(data->pages);
279 else
280 kfree(data->pages);
Laura Abbott8c017362011-09-22 20:59:12 -0700281 kfree(data);
282}
283
284void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
Laura Abbottb14ed962012-01-30 14:18:08 -0800285 struct ion_buffer *buffer)
Laura Abbott8c017362011-09-22 20:59:12 -0700286{
287 struct ion_iommu_priv_data *data = buffer->priv_virt;
288 pgprot_t page_prot = PAGE_KERNEL;
289
290 if (!data)
291 return NULL;
292
Laura Abbottb14ed962012-01-30 14:18:08 -0800293 if (!ION_IS_CACHED(buffer->flags))
Olav Haugan08f911b2012-12-13 09:51:59 -0800294 page_prot = pgprot_writecombine(page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700295
296 buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
297
298 return buffer->vaddr;
299}
300
301void ion_iommu_heap_unmap_kernel(struct ion_heap *heap,
302 struct ion_buffer *buffer)
303{
304 if (!buffer->vaddr)
305 return;
306
307 vunmap(buffer->vaddr);
308 buffer->vaddr = NULL;
309}
310
311int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800312 struct vm_area_struct *vma)
Laura Abbott8c017362011-09-22 20:59:12 -0700313{
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800314 struct sg_table *table = buffer->sg_table;
315 unsigned long addr = vma->vm_start;
316 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
317 struct scatterlist *sg;
Laura Abbott8c017362011-09-22 20:59:12 -0700318 int i;
Laura Abbott8c017362011-09-22 20:59:12 -0700319
Laura Abbottb14ed962012-01-30 14:18:08 -0800320 if (!ION_IS_CACHED(buffer->flags))
Olav Haugande074a72012-02-22 15:39:54 -0800321 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700322
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800323 for_each_sg(table->sgl, sg, table->nents, i) {
324 struct page *page = sg_page(sg);
325 unsigned long remainder = vma->vm_end - addr;
326 unsigned long len = sg_dma_len(sg);
327
328 if (offset >= sg_dma_len(sg)) {
329 offset -= sg_dma_len(sg);
330 continue;
331 } else if (offset) {
332 page += offset / PAGE_SIZE;
333 len = sg_dma_len(sg) - offset;
334 offset = 0;
Olav Haugandbec7db2012-02-25 10:32:41 -0800335 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800336 len = min(len, remainder);
337 remap_pfn_range(vma, addr, page_to_pfn(page), len,
338 vma->vm_page_prot);
339 addr += len;
340 if (addr >= vma->vm_end)
341 return 0;
Olav Haugandbec7db2012-02-25 10:32:41 -0800342 }
Laura Abbott8c017362011-09-22 20:59:12 -0700343 return 0;
344}
345
Laura Abbottb14ed962012-01-30 14:18:08 -0800346static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap,
Olav Hauganab804b82012-03-05 14:41:16 -0800347 struct ion_buffer *buffer)
348{
Laura Abbottb14ed962012-01-30 14:18:08 -0800349 return buffer->sg_table;
Olav Hauganab804b82012-03-05 14:41:16 -0800350}
351
352static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
353 struct ion_buffer *buffer)
354{
Olav Hauganab804b82012-03-05 14:41:16 -0800355}
356
Laura Abbott8c017362011-09-22 20:59:12 -0700357static struct ion_heap_ops iommu_heap_ops = {
358 .allocate = ion_iommu_heap_allocate,
359 .free = ion_iommu_heap_free,
360 .map_user = ion_iommu_heap_map_user,
361 .map_kernel = ion_iommu_heap_map_kernel,
362 .unmap_kernel = ion_iommu_heap_unmap_kernel,
Olav Hauganab804b82012-03-05 14:41:16 -0800363 .map_dma = ion_iommu_heap_map_dma,
364 .unmap_dma = ion_iommu_heap_unmap_dma,
Laura Abbott8c017362011-09-22 20:59:12 -0700365};
366
367struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
368{
369 struct ion_iommu_heap *iommu_heap;
370
371 iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL);
372 if (!iommu_heap)
373 return ERR_PTR(-ENOMEM);
374
375 iommu_heap->heap.ops = &iommu_heap_ops;
376 iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
Laura Abbott8c017362011-09-22 20:59:12 -0700377
378 return &iommu_heap->heap;
379}
380
381void ion_iommu_heap_destroy(struct ion_heap *heap)
382{
383 struct ion_iommu_heap *iommu_heap =
384 container_of(heap, struct ion_iommu_heap, heap);
385
386 kfree(iommu_heap);
387 iommu_heap = NULL;
388}