blob: a80b0c6575bb83eb0503dc21f542949d71d1195a [file] [log] [blame]
Laura Abbott8c017362011-09-22 20:59:12 -07001/*
Mitchel Humpherysd1a69032013-01-31 10:30:40 -08002 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Laura Abbott8c017362011-09-22 20:59:12 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/err.h>
14#include <linux/io.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070015#include <linux/msm_ion.h>
Laura Abbott8c017362011-09-22 20:59:12 -070016#include <linux/mm.h>
Mitchel Humpherysb358e072013-02-01 18:30:14 -080017#include <linux/highmem.h>
Laura Abbott8c017362011-09-22 20:59:12 -070018#include <linux/scatterlist.h>
19#include <linux/slab.h>
20#include <linux/vmalloc.h>
21#include <linux/iommu.h>
22#include <linux/pfn.h>
Laura Abbott60c92c72012-10-10 13:12:52 -070023#include <linux/dma-mapping.h>
Laura Abbott8c017362011-09-22 20:59:12 -070024#include "ion_priv.h"
25
26#include <asm/mach/map.h>
27#include <asm/page.h>
Olav Haugan85c95402012-05-30 17:32:37 -070028#include <asm/cacheflush.h>
Laura Abbott8c017362011-09-22 20:59:12 -070029#include <mach/iommu_domains.h>
Adrian Alexei21f62bd2013-04-22 12:57:41 -070030#include <trace/events/kmem.h>
Laura Abbott8c017362011-09-22 20:59:12 -070031
32struct ion_iommu_heap {
33 struct ion_heap heap;
34};
35
Mitchel Humpherysb358e072013-02-01 18:30:14 -080036/*
37 * We will attempt to allocate high-order pages and store those in an
38 * sg_list. However, some APIs expect an array of struct page * where
39 * each page is of size PAGE_SIZE. We use this extra structure to
40 * carry around an array of such pages (derived from the high-order
41 * pages with nth_page).
42 */
Laura Abbott8c017362011-09-22 20:59:12 -070043struct ion_iommu_priv_data {
44 struct page **pages;
Olav Haugan8909e3b2013-05-17 17:09:49 -070045 unsigned int pages_uses_vmalloc;
Laura Abbott8c017362011-09-22 20:59:12 -070046 int nrpages;
47 unsigned long size;
48};
49
Mitchel Humpherysd1a69032013-01-31 10:30:40 -080050#define MAX_VMAP_RETRIES 10
51
Mitchel Humpherysb358e072013-02-01 18:30:14 -080052static const unsigned int orders[] = {8, 4, 0};
53static const int num_orders = ARRAY_SIZE(orders);
54
55struct page_info {
56 struct page *page;
57 unsigned int order;
58 struct list_head list;
59};
60
61static unsigned int order_to_size(int order)
62{
63 return PAGE_SIZE << order;
64}
65
66static struct page_info *alloc_largest_available(unsigned long size,
67 unsigned int max_order)
68{
69 struct page *page;
70 struct page_info *info;
71 int i;
72
73 for (i = 0; i < num_orders; i++) {
Mitchel Humpherysff6ea622013-02-20 13:55:46 -080074 gfp_t gfp;
Mitchel Humpherysb358e072013-02-01 18:30:14 -080075 if (size < order_to_size(orders[i]))
76 continue;
77 if (max_order < orders[i])
78 continue;
79
Olav Haugan041d4b52013-04-02 14:03:34 -070080 gfp = __GFP_HIGHMEM;
Mitchel Humpherysff6ea622013-02-20 13:55:46 -080081
Olav Haugan041d4b52013-04-02 14:03:34 -070082 if (orders[i]) {
83 gfp |= __GFP_COMP | __GFP_NORETRY |
84 __GFP_NO_KSWAPD | __GFP_NOWARN;
85 } else {
86 gfp |= GFP_KERNEL;
87 }
Adrian Alexei21f62bd2013-04-22 12:57:41 -070088 trace_alloc_pages_iommu_start(gfp, orders[i]);
Mitchel Humpherysff6ea622013-02-20 13:55:46 -080089 page = alloc_pages(gfp, orders[i]);
Adrian Alexei21f62bd2013-04-22 12:57:41 -070090 trace_alloc_pages_iommu_end(gfp, orders[i]);
91 if (!page) {
92 trace_alloc_pages_iommu_fail(gfp, orders[i]);
Mitchel Humpherysb358e072013-02-01 18:30:14 -080093 continue;
Adrian Alexei21f62bd2013-04-22 12:57:41 -070094 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -080095
96 info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
Olav Haugan6093f3a2013-06-26 16:04:33 -070097 if (info) {
98 info->page = page;
99 info->order = orders[i];
100 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800101 return info;
102 }
103 return NULL;
104}
105
Laura Abbott8c017362011-09-22 20:59:12 -0700106static int ion_iommu_heap_allocate(struct ion_heap *heap,
107 struct ion_buffer *buffer,
108 unsigned long size, unsigned long align,
109 unsigned long flags)
110{
111 int ret, i;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800112 struct list_head pages_list;
113 struct page_info *info, *tmp_info;
Laura Abbott8c017362011-09-22 20:59:12 -0700114 struct ion_iommu_priv_data *data = NULL;
115
116 if (msm_use_iommu()) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800117 struct scatterlist *sg;
118 struct sg_table *table;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800119 int j;
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800120 void *ptr = NULL;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800121 unsigned int npages_to_vmap, total_pages, num_large_pages = 0;
Olav Haugand8770692013-04-17 16:11:31 -0700122 unsigned long size_remaining = PAGE_ALIGN(size);
Mitchel Humpherys1010c7f2013-02-22 17:36:16 -0800123 unsigned int max_order = ION_IS_CACHED(flags) ? 0 : orders[0];
Olav Haugan8909e3b2013-05-17 17:09:49 -0700124 unsigned int page_tbl_size;
Laura Abbottb14ed962012-01-30 14:18:08 -0800125
Laura Abbott8c017362011-09-22 20:59:12 -0700126 data = kmalloc(sizeof(*data), GFP_KERNEL);
127 if (!data)
128 return -ENOMEM;
129
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800130 INIT_LIST_HEAD(&pages_list);
131 while (size_remaining > 0) {
132 info = alloc_largest_available(size_remaining,
133 max_order);
134 if (!info) {
135 ret = -ENOMEM;
136 goto err_free_data;
137 }
138 list_add_tail(&info->list, &pages_list);
139 size_remaining -= order_to_size(info->order);
140 max_order = info->order;
141 num_large_pages++;
142 }
143
Laura Abbott8c017362011-09-22 20:59:12 -0700144 data->size = PFN_ALIGN(size);
145 data->nrpages = data->size >> PAGE_SHIFT;
Olav Haugan8909e3b2013-05-17 17:09:49 -0700146 data->pages_uses_vmalloc = 0;
147 page_tbl_size = sizeof(struct page *) * data->nrpages;
148
149 if (page_tbl_size > SZ_8K) {
150 /*
151 * Do fallback to ensure we have a balance between
152 * performance and availability.
153 */
154 data->pages = kmalloc(page_tbl_size,
155 __GFP_COMP | __GFP_NORETRY |
156 __GFP_NO_KSWAPD | __GFP_NOWARN);
157 if (!data->pages) {
158 data->pages = vmalloc(page_tbl_size);
159 data->pages_uses_vmalloc = 1;
160 }
161 } else {
162 data->pages = kmalloc(page_tbl_size, GFP_KERNEL);
163 }
Laura Abbott8c017362011-09-22 20:59:12 -0700164 if (!data->pages) {
165 ret = -ENOMEM;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800166 goto err_free_data;
Laura Abbott8c017362011-09-22 20:59:12 -0700167 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800168
169 table = buffer->sg_table =
170 kzalloc(sizeof(struct sg_table), GFP_KERNEL);
171
172 if (!table) {
Olav Haugan16cdb412012-03-27 13:02:17 -0700173 ret = -ENOMEM;
174 goto err1;
175 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800176 ret = sg_alloc_table(table, num_large_pages, GFP_KERNEL);
Laura Abbottb14ed962012-01-30 14:18:08 -0800177 if (ret)
178 goto err2;
Olav Haugan16cdb412012-03-27 13:02:17 -0700179
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800180 i = 0;
181 sg = table->sgl;
182 list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
183 struct page *page = info->page;
184 sg_set_page(sg, page, order_to_size(info->order), 0);
Laura Abbott60c92c72012-10-10 13:12:52 -0700185 sg_dma_address(sg) = sg_phys(sg);
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800186 sg = sg_next(sg);
187 for (j = 0; j < (1 << info->order); ++j)
188 data->pages[i++] = nth_page(page, j);
189 list_del(&info->list);
190 kfree(info);
Laura Abbott8c017362011-09-22 20:59:12 -0700191 }
192
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800193 /*
194 * As an optimization, we omit __GFP_ZERO from
195 * alloc_page above and manually zero out all of the
196 * pages in one fell swoop here. To safeguard against
197 * insufficient vmalloc space, we only vmap
198 * `npages_to_vmap' at a time, starting with a
199 * conservative estimate of 1/8 of the total number of
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800200 * vmalloc pages available. Note that the `pages'
201 * array is composed of all 4K pages, irrespective of
202 * the size of the pages on the sg list.
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800203 */
204 npages_to_vmap = ((VMALLOC_END - VMALLOC_START)/8)
205 >> PAGE_SHIFT;
206 total_pages = data->nrpages;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800207 for (i = 0; i < total_pages; i += npages_to_vmap) {
208 npages_to_vmap = min(npages_to_vmap, total_pages - i);
209 for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap;
210 ++j) {
211 ptr = vmap(&data->pages[i], npages_to_vmap,
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800212 VM_IOREMAP, pgprot_kernel);
213 if (ptr)
214 break;
215 else
216 npages_to_vmap >>= 1;
217 }
218 if (!ptr) {
219 pr_err("Couldn't vmap the pages for zeroing\n");
220 ret = -ENOMEM;
221 goto err3;
222 }
223 memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
224 vunmap(ptr);
225 }
226
Laura Abbott60c92c72012-10-10 13:12:52 -0700227 if (!ION_IS_CACHED(flags))
228 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
229 DMA_BIDIRECTIONAL);
230
Laura Abbott8c017362011-09-22 20:59:12 -0700231 buffer->priv_virt = data;
232 return 0;
233
234 } else {
235 return -ENOMEM;
236 }
237
238
Laura Abbottb14ed962012-01-30 14:18:08 -0800239err3:
240 sg_free_table(buffer->sg_table);
Laura Abbott8c017362011-09-22 20:59:12 -0700241err2:
Laura Abbottb14ed962012-01-30 14:18:08 -0800242 kfree(buffer->sg_table);
243 buffer->sg_table = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700244err1:
Olav Haugan8909e3b2013-05-17 17:09:49 -0700245 if (data->pages_uses_vmalloc)
246 vfree(data->pages);
247 else
248 kfree(data->pages);
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800249err_free_data:
Laura Abbott8c017362011-09-22 20:59:12 -0700250 kfree(data);
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800251
252 list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
253 if (info->page)
254 __free_pages(info->page, info->order);
255 list_del(&info->list);
256 kfree(info);
257 }
Laura Abbott8c017362011-09-22 20:59:12 -0700258 return ret;
259}
260
261static void ion_iommu_heap_free(struct ion_buffer *buffer)
262{
Laura Abbott8c017362011-09-22 20:59:12 -0700263 int i;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800264 struct scatterlist *sg;
265 struct sg_table *table = buffer->sg_table;
266 struct ion_iommu_priv_data *data = buffer->priv_virt;
Laura Abbott8c017362011-09-22 20:59:12 -0700267
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800268 if (!table)
269 return;
Laura Abbott8c017362011-09-22 20:59:12 -0700270 if (!data)
271 return;
272
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800273 for_each_sg(table->sgl, sg, table->nents, i)
274 __free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
Laura Abbott8c017362011-09-22 20:59:12 -0700275
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800276 sg_free_table(table);
277 kfree(table);
278 table = 0;
Olav Haugan8909e3b2013-05-17 17:09:49 -0700279 if (data->pages_uses_vmalloc)
280 vfree(data->pages);
281 else
282 kfree(data->pages);
Laura Abbott8c017362011-09-22 20:59:12 -0700283 kfree(data);
284}
285
286void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
Laura Abbottb14ed962012-01-30 14:18:08 -0800287 struct ion_buffer *buffer)
Laura Abbott8c017362011-09-22 20:59:12 -0700288{
289 struct ion_iommu_priv_data *data = buffer->priv_virt;
290 pgprot_t page_prot = PAGE_KERNEL;
291
292 if (!data)
293 return NULL;
294
Laura Abbottb14ed962012-01-30 14:18:08 -0800295 if (!ION_IS_CACHED(buffer->flags))
Olav Haugan08f911b2012-12-13 09:51:59 -0800296 page_prot = pgprot_writecombine(page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700297
298 buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
299
300 return buffer->vaddr;
301}
302
303void ion_iommu_heap_unmap_kernel(struct ion_heap *heap,
304 struct ion_buffer *buffer)
305{
306 if (!buffer->vaddr)
307 return;
308
309 vunmap(buffer->vaddr);
310 buffer->vaddr = NULL;
311}
312
313int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800314 struct vm_area_struct *vma)
Laura Abbott8c017362011-09-22 20:59:12 -0700315{
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800316 struct sg_table *table = buffer->sg_table;
317 unsigned long addr = vma->vm_start;
318 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
319 struct scatterlist *sg;
Laura Abbott8c017362011-09-22 20:59:12 -0700320 int i;
Laura Abbott8c017362011-09-22 20:59:12 -0700321
Laura Abbottb14ed962012-01-30 14:18:08 -0800322 if (!ION_IS_CACHED(buffer->flags))
Olav Haugande074a72012-02-22 15:39:54 -0800323 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700324
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800325 for_each_sg(table->sgl, sg, table->nents, i) {
326 struct page *page = sg_page(sg);
327 unsigned long remainder = vma->vm_end - addr;
328 unsigned long len = sg_dma_len(sg);
329
330 if (offset >= sg_dma_len(sg)) {
331 offset -= sg_dma_len(sg);
332 continue;
333 } else if (offset) {
334 page += offset / PAGE_SIZE;
335 len = sg_dma_len(sg) - offset;
336 offset = 0;
Olav Haugandbec7db2012-02-25 10:32:41 -0800337 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800338 len = min(len, remainder);
339 remap_pfn_range(vma, addr, page_to_pfn(page), len,
340 vma->vm_page_prot);
341 addr += len;
342 if (addr >= vma->vm_end)
343 return 0;
Olav Haugandbec7db2012-02-25 10:32:41 -0800344 }
Laura Abbott8c017362011-09-22 20:59:12 -0700345 return 0;
346}
347
Laura Abbottb14ed962012-01-30 14:18:08 -0800348static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap,
Olav Hauganab804b82012-03-05 14:41:16 -0800349 struct ion_buffer *buffer)
350{
Laura Abbottb14ed962012-01-30 14:18:08 -0800351 return buffer->sg_table;
Olav Hauganab804b82012-03-05 14:41:16 -0800352}
353
354static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
355 struct ion_buffer *buffer)
356{
Olav Hauganab804b82012-03-05 14:41:16 -0800357}
358
Laura Abbott8c017362011-09-22 20:59:12 -0700359static struct ion_heap_ops iommu_heap_ops = {
360 .allocate = ion_iommu_heap_allocate,
361 .free = ion_iommu_heap_free,
362 .map_user = ion_iommu_heap_map_user,
363 .map_kernel = ion_iommu_heap_map_kernel,
364 .unmap_kernel = ion_iommu_heap_unmap_kernel,
Olav Hauganab804b82012-03-05 14:41:16 -0800365 .map_dma = ion_iommu_heap_map_dma,
366 .unmap_dma = ion_iommu_heap_unmap_dma,
Laura Abbott8c017362011-09-22 20:59:12 -0700367};
368
369struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
370{
371 struct ion_iommu_heap *iommu_heap;
372
373 iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL);
374 if (!iommu_heap)
375 return ERR_PTR(-ENOMEM);
376
377 iommu_heap->heap.ops = &iommu_heap_ops;
378 iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
Laura Abbott8c017362011-09-22 20:59:12 -0700379
380 return &iommu_heap->heap;
381}
382
383void ion_iommu_heap_destroy(struct ion_heap *heap)
384{
385 struct ion_iommu_heap *iommu_heap =
386 container_of(heap, struct ion_iommu_heap, heap);
387
388 kfree(iommu_heap);
389 iommu_heap = NULL;
390}