blob: bc9bddd837ac9b02ae09d2ab8b62a7e45fb3b887 [file] [log] [blame]
Laura Abbott8c017362011-09-22 20:59:12 -07001/*
Mitchel Humpherysd1a69032013-01-31 10:30:40 -08002 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Laura Abbott8c017362011-09-22 20:59:12 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/err.h>
14#include <linux/io.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070015#include <linux/msm_ion.h>
Laura Abbott8c017362011-09-22 20:59:12 -070016#include <linux/mm.h>
Mitchel Humpherysb358e072013-02-01 18:30:14 -080017#include <linux/highmem.h>
Laura Abbott8c017362011-09-22 20:59:12 -070018#include <linux/scatterlist.h>
19#include <linux/slab.h>
20#include <linux/vmalloc.h>
21#include <linux/iommu.h>
22#include <linux/pfn.h>
Laura Abbott60c92c72012-10-10 13:12:52 -070023#include <linux/dma-mapping.h>
Laura Abbott8c017362011-09-22 20:59:12 -070024#include "ion_priv.h"
25
26#include <asm/mach/map.h>
27#include <asm/page.h>
Olav Haugan85c95402012-05-30 17:32:37 -070028#include <asm/cacheflush.h>
Laura Abbott8c017362011-09-22 20:59:12 -070029#include <mach/iommu_domains.h>
Adrian Alexei21f62bd2013-04-22 12:57:41 -070030#include <trace/events/kmem.h>
Laura Abbott8c017362011-09-22 20:59:12 -070031
32struct ion_iommu_heap {
33 struct ion_heap heap;
34};
35
Mitchel Humpherysb358e072013-02-01 18:30:14 -080036/*
37 * We will attempt to allocate high-order pages and store those in an
38 * sg_list. However, some APIs expect an array of struct page * where
39 * each page is of size PAGE_SIZE. We use this extra structure to
40 * carry around an array of such pages (derived from the high-order
41 * pages with nth_page).
42 */
Laura Abbott8c017362011-09-22 20:59:12 -070043struct ion_iommu_priv_data {
44 struct page **pages;
45 int nrpages;
46 unsigned long size;
47};
48
Mitchel Humpherysd1a69032013-01-31 10:30:40 -080049#define MAX_VMAP_RETRIES 10
50
Mitchel Humpherysb358e072013-02-01 18:30:14 -080051static const unsigned int orders[] = {8, 4, 0};
52static const int num_orders = ARRAY_SIZE(orders);
53
54struct page_info {
55 struct page *page;
56 unsigned int order;
57 struct list_head list;
58};
59
60static unsigned int order_to_size(int order)
61{
62 return PAGE_SIZE << order;
63}
64
65static struct page_info *alloc_largest_available(unsigned long size,
66 unsigned int max_order)
67{
68 struct page *page;
69 struct page_info *info;
70 int i;
71
72 for (i = 0; i < num_orders; i++) {
Mitchel Humpherysff6ea622013-02-20 13:55:46 -080073 gfp_t gfp;
Mitchel Humpherysb358e072013-02-01 18:30:14 -080074 if (size < order_to_size(orders[i]))
75 continue;
76 if (max_order < orders[i])
77 continue;
78
Olav Haugan041d4b52013-04-02 14:03:34 -070079 gfp = __GFP_HIGHMEM;
Mitchel Humpherysff6ea622013-02-20 13:55:46 -080080
Olav Haugan041d4b52013-04-02 14:03:34 -070081 if (orders[i]) {
82 gfp |= __GFP_COMP | __GFP_NORETRY |
83 __GFP_NO_KSWAPD | __GFP_NOWARN;
84 } else {
85 gfp |= GFP_KERNEL;
86 }
Adrian Alexei21f62bd2013-04-22 12:57:41 -070087 trace_alloc_pages_iommu_start(gfp, orders[i]);
Mitchel Humpherysff6ea622013-02-20 13:55:46 -080088 page = alloc_pages(gfp, orders[i]);
Adrian Alexei21f62bd2013-04-22 12:57:41 -070089 trace_alloc_pages_iommu_end(gfp, orders[i]);
90 if (!page) {
91 trace_alloc_pages_iommu_fail(gfp, orders[i]);
Mitchel Humpherysb358e072013-02-01 18:30:14 -080092 continue;
Adrian Alexei21f62bd2013-04-22 12:57:41 -070093 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -080094
95 info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
96 info->page = page;
97 info->order = orders[i];
98 return info;
99 }
100 return NULL;
101}
102
Laura Abbott8c017362011-09-22 20:59:12 -0700103static int ion_iommu_heap_allocate(struct ion_heap *heap,
104 struct ion_buffer *buffer,
105 unsigned long size, unsigned long align,
106 unsigned long flags)
107{
108 int ret, i;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800109 struct list_head pages_list;
110 struct page_info *info, *tmp_info;
Laura Abbott8c017362011-09-22 20:59:12 -0700111 struct ion_iommu_priv_data *data = NULL;
112
113 if (msm_use_iommu()) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800114 struct scatterlist *sg;
115 struct sg_table *table;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800116 int j;
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800117 void *ptr = NULL;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800118 unsigned int npages_to_vmap, total_pages, num_large_pages = 0;
Olav Haugand8770692013-04-17 16:11:31 -0700119 unsigned long size_remaining = PAGE_ALIGN(size);
Mitchel Humpherys1010c7f2013-02-22 17:36:16 -0800120 unsigned int max_order = ION_IS_CACHED(flags) ? 0 : orders[0];
Laura Abbottb14ed962012-01-30 14:18:08 -0800121
Laura Abbott8c017362011-09-22 20:59:12 -0700122 data = kmalloc(sizeof(*data), GFP_KERNEL);
123 if (!data)
124 return -ENOMEM;
125
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800126 INIT_LIST_HEAD(&pages_list);
127 while (size_remaining > 0) {
128 info = alloc_largest_available(size_remaining,
129 max_order);
130 if (!info) {
131 ret = -ENOMEM;
132 goto err_free_data;
133 }
134 list_add_tail(&info->list, &pages_list);
135 size_remaining -= order_to_size(info->order);
136 max_order = info->order;
137 num_large_pages++;
138 }
139
Laura Abbott8c017362011-09-22 20:59:12 -0700140 data->size = PFN_ALIGN(size);
141 data->nrpages = data->size >> PAGE_SHIFT;
142 data->pages = kzalloc(sizeof(struct page *)*data->nrpages,
143 GFP_KERNEL);
144 if (!data->pages) {
145 ret = -ENOMEM;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800146 goto err_free_data;
Laura Abbott8c017362011-09-22 20:59:12 -0700147 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800148
149 table = buffer->sg_table =
150 kzalloc(sizeof(struct sg_table), GFP_KERNEL);
151
152 if (!table) {
Olav Haugan16cdb412012-03-27 13:02:17 -0700153 ret = -ENOMEM;
154 goto err1;
155 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800156 ret = sg_alloc_table(table, num_large_pages, GFP_KERNEL);
Laura Abbottb14ed962012-01-30 14:18:08 -0800157 if (ret)
158 goto err2;
Olav Haugan16cdb412012-03-27 13:02:17 -0700159
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800160 i = 0;
161 sg = table->sgl;
162 list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
163 struct page *page = info->page;
164 sg_set_page(sg, page, order_to_size(info->order), 0);
Laura Abbott60c92c72012-10-10 13:12:52 -0700165 sg_dma_address(sg) = sg_phys(sg);
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800166 sg = sg_next(sg);
167 for (j = 0; j < (1 << info->order); ++j)
168 data->pages[i++] = nth_page(page, j);
169 list_del(&info->list);
170 kfree(info);
Laura Abbott8c017362011-09-22 20:59:12 -0700171 }
172
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800173 /*
174 * As an optimization, we omit __GFP_ZERO from
175 * alloc_page above and manually zero out all of the
176 * pages in one fell swoop here. To safeguard against
177 * insufficient vmalloc space, we only vmap
178 * `npages_to_vmap' at a time, starting with a
179 * conservative estimate of 1/8 of the total number of
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800180 * vmalloc pages available. Note that the `pages'
181 * array is composed of all 4K pages, irrespective of
182 * the size of the pages on the sg list.
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800183 */
184 npages_to_vmap = ((VMALLOC_END - VMALLOC_START)/8)
185 >> PAGE_SHIFT;
186 total_pages = data->nrpages;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800187 for (i = 0; i < total_pages; i += npages_to_vmap) {
188 npages_to_vmap = min(npages_to_vmap, total_pages - i);
189 for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap;
190 ++j) {
191 ptr = vmap(&data->pages[i], npages_to_vmap,
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800192 VM_IOREMAP, pgprot_kernel);
193 if (ptr)
194 break;
195 else
196 npages_to_vmap >>= 1;
197 }
198 if (!ptr) {
199 pr_err("Couldn't vmap the pages for zeroing\n");
200 ret = -ENOMEM;
201 goto err3;
202 }
203 memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
204 vunmap(ptr);
205 }
206
Laura Abbott60c92c72012-10-10 13:12:52 -0700207 if (!ION_IS_CACHED(flags))
208 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
209 DMA_BIDIRECTIONAL);
210
Laura Abbott8c017362011-09-22 20:59:12 -0700211 buffer->priv_virt = data;
212 return 0;
213
214 } else {
215 return -ENOMEM;
216 }
217
218
Laura Abbottb14ed962012-01-30 14:18:08 -0800219err3:
220 sg_free_table(buffer->sg_table);
Laura Abbott8c017362011-09-22 20:59:12 -0700221err2:
Laura Abbottb14ed962012-01-30 14:18:08 -0800222 kfree(buffer->sg_table);
223 buffer->sg_table = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700224err1:
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800225 kfree(data->pages);
226err_free_data:
Laura Abbott8c017362011-09-22 20:59:12 -0700227 kfree(data);
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800228
229 list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
230 if (info->page)
231 __free_pages(info->page, info->order);
232 list_del(&info->list);
233 kfree(info);
234 }
Laura Abbott8c017362011-09-22 20:59:12 -0700235 return ret;
236}
237
238static void ion_iommu_heap_free(struct ion_buffer *buffer)
239{
Laura Abbott8c017362011-09-22 20:59:12 -0700240 int i;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800241 struct scatterlist *sg;
242 struct sg_table *table = buffer->sg_table;
243 struct ion_iommu_priv_data *data = buffer->priv_virt;
Laura Abbott8c017362011-09-22 20:59:12 -0700244
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800245 if (!table)
246 return;
Laura Abbott8c017362011-09-22 20:59:12 -0700247 if (!data)
248 return;
249
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800250 for_each_sg(table->sgl, sg, table->nents, i)
251 __free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
Laura Abbott8c017362011-09-22 20:59:12 -0700252
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800253 sg_free_table(table);
254 kfree(table);
255 table = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700256 kfree(data->pages);
257 kfree(data);
258}
259
260void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
Laura Abbottb14ed962012-01-30 14:18:08 -0800261 struct ion_buffer *buffer)
Laura Abbott8c017362011-09-22 20:59:12 -0700262{
263 struct ion_iommu_priv_data *data = buffer->priv_virt;
264 pgprot_t page_prot = PAGE_KERNEL;
265
266 if (!data)
267 return NULL;
268
Laura Abbottb14ed962012-01-30 14:18:08 -0800269 if (!ION_IS_CACHED(buffer->flags))
Olav Haugan08f911b2012-12-13 09:51:59 -0800270 page_prot = pgprot_writecombine(page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700271
272 buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
273
274 return buffer->vaddr;
275}
276
277void ion_iommu_heap_unmap_kernel(struct ion_heap *heap,
278 struct ion_buffer *buffer)
279{
280 if (!buffer->vaddr)
281 return;
282
283 vunmap(buffer->vaddr);
284 buffer->vaddr = NULL;
285}
286
287int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800288 struct vm_area_struct *vma)
Laura Abbott8c017362011-09-22 20:59:12 -0700289{
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800290 struct sg_table *table = buffer->sg_table;
291 unsigned long addr = vma->vm_start;
292 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
293 struct scatterlist *sg;
Laura Abbott8c017362011-09-22 20:59:12 -0700294 int i;
Laura Abbott8c017362011-09-22 20:59:12 -0700295
Laura Abbottb14ed962012-01-30 14:18:08 -0800296 if (!ION_IS_CACHED(buffer->flags))
Olav Haugande074a72012-02-22 15:39:54 -0800297 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700298
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800299 for_each_sg(table->sgl, sg, table->nents, i) {
300 struct page *page = sg_page(sg);
301 unsigned long remainder = vma->vm_end - addr;
302 unsigned long len = sg_dma_len(sg);
303
304 if (offset >= sg_dma_len(sg)) {
305 offset -= sg_dma_len(sg);
306 continue;
307 } else if (offset) {
308 page += offset / PAGE_SIZE;
309 len = sg_dma_len(sg) - offset;
310 offset = 0;
Olav Haugandbec7db2012-02-25 10:32:41 -0800311 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800312 len = min(len, remainder);
313 remap_pfn_range(vma, addr, page_to_pfn(page), len,
314 vma->vm_page_prot);
315 addr += len;
316 if (addr >= vma->vm_end)
317 return 0;
Olav Haugandbec7db2012-02-25 10:32:41 -0800318 }
Laura Abbott8c017362011-09-22 20:59:12 -0700319 return 0;
320}
321
Laura Abbottb14ed962012-01-30 14:18:08 -0800322static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap,
Olav Hauganab804b82012-03-05 14:41:16 -0800323 struct ion_buffer *buffer)
324{
Laura Abbottb14ed962012-01-30 14:18:08 -0800325 return buffer->sg_table;
Olav Hauganab804b82012-03-05 14:41:16 -0800326}
327
328static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
329 struct ion_buffer *buffer)
330{
Olav Hauganab804b82012-03-05 14:41:16 -0800331}
332
Laura Abbott8c017362011-09-22 20:59:12 -0700333static struct ion_heap_ops iommu_heap_ops = {
334 .allocate = ion_iommu_heap_allocate,
335 .free = ion_iommu_heap_free,
336 .map_user = ion_iommu_heap_map_user,
337 .map_kernel = ion_iommu_heap_map_kernel,
338 .unmap_kernel = ion_iommu_heap_unmap_kernel,
Olav Hauganab804b82012-03-05 14:41:16 -0800339 .map_dma = ion_iommu_heap_map_dma,
340 .unmap_dma = ion_iommu_heap_unmap_dma,
Laura Abbott8c017362011-09-22 20:59:12 -0700341};
342
343struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
344{
345 struct ion_iommu_heap *iommu_heap;
346
347 iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL);
348 if (!iommu_heap)
349 return ERR_PTR(-ENOMEM);
350
351 iommu_heap->heap.ops = &iommu_heap_ops;
352 iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
Laura Abbott8c017362011-09-22 20:59:12 -0700353
354 return &iommu_heap->heap;
355}
356
357void ion_iommu_heap_destroy(struct ion_heap *heap)
358{
359 struct ion_iommu_heap *iommu_heap =
360 container_of(heap, struct ion_iommu_heap, heap);
361
362 kfree(iommu_heap);
363 iommu_heap = NULL;
364}