blob: ca29016cc7045fbbd1f8bf7dd88d71e01f22e03e [file] [log] [blame]
Laura Abbott8c017362011-09-22 20:59:12 -07001/*
Mitchel Humpherysd1a69032013-01-31 10:30:40 -08002 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Laura Abbott8c017362011-09-22 20:59:12 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/err.h>
14#include <linux/io.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070015#include <linux/msm_ion.h>
Laura Abbott8c017362011-09-22 20:59:12 -070016#include <linux/mm.h>
Mitchel Humpherysb358e072013-02-01 18:30:14 -080017#include <linux/highmem.h>
Laura Abbott8c017362011-09-22 20:59:12 -070018#include <linux/scatterlist.h>
19#include <linux/slab.h>
20#include <linux/vmalloc.h>
21#include <linux/iommu.h>
22#include <linux/pfn.h>
Laura Abbott60c92c72012-10-10 13:12:52 -070023#include <linux/dma-mapping.h>
Laura Abbott8c017362011-09-22 20:59:12 -070024#include "ion_priv.h"
25
26#include <asm/mach/map.h>
27#include <asm/page.h>
Olav Haugan85c95402012-05-30 17:32:37 -070028#include <asm/cacheflush.h>
Laura Abbott8c017362011-09-22 20:59:12 -070029#include <mach/iommu_domains.h>
30
31struct ion_iommu_heap {
32 struct ion_heap heap;
33};
34
Mitchel Humpherysb358e072013-02-01 18:30:14 -080035/*
36 * We will attempt to allocate high-order pages and store those in an
37 * sg_list. However, some APIs expect an array of struct page * where
38 * each page is of size PAGE_SIZE. We use this extra structure to
39 * carry around an array of such pages (derived from the high-order
40 * pages with nth_page).
41 */
Laura Abbott8c017362011-09-22 20:59:12 -070042struct ion_iommu_priv_data {
43 struct page **pages;
44 int nrpages;
45 unsigned long size;
46};
47
Mitchel Humpherysd1a69032013-01-31 10:30:40 -080048#define MAX_VMAP_RETRIES 10
49
Mitchel Humpherysb358e072013-02-01 18:30:14 -080050static const unsigned int orders[] = {8, 4, 0};
51static const int num_orders = ARRAY_SIZE(orders);
52
53struct page_info {
54 struct page *page;
55 unsigned int order;
56 struct list_head list;
57};
58
59static unsigned int order_to_size(int order)
60{
61 return PAGE_SIZE << order;
62}
63
64static struct page_info *alloc_largest_available(unsigned long size,
65 unsigned int max_order)
66{
67 struct page *page;
68 struct page_info *info;
69 int i;
70
71 for (i = 0; i < num_orders; i++) {
Mitchel Humpherysff6ea622013-02-20 13:55:46 -080072 gfp_t gfp;
Mitchel Humpherysb358e072013-02-01 18:30:14 -080073 if (size < order_to_size(orders[i]))
74 continue;
75 if (max_order < orders[i])
76 continue;
77
Olav Haugan041d4b52013-04-02 14:03:34 -070078 gfp = __GFP_HIGHMEM;
Mitchel Humpherysff6ea622013-02-20 13:55:46 -080079
Olav Haugan041d4b52013-04-02 14:03:34 -070080 if (orders[i]) {
81 gfp |= __GFP_COMP | __GFP_NORETRY |
82 __GFP_NO_KSWAPD | __GFP_NOWARN;
83 } else {
84 gfp |= GFP_KERNEL;
85 }
Mitchel Humpherysff6ea622013-02-20 13:55:46 -080086 page = alloc_pages(gfp, orders[i]);
Mitchel Humpherysb358e072013-02-01 18:30:14 -080087 if (!page)
88 continue;
89
90 info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
91 info->page = page;
92 info->order = orders[i];
93 return info;
94 }
95 return NULL;
96}
97
Laura Abbott8c017362011-09-22 20:59:12 -070098static int ion_iommu_heap_allocate(struct ion_heap *heap,
99 struct ion_buffer *buffer,
100 unsigned long size, unsigned long align,
101 unsigned long flags)
102{
103 int ret, i;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800104 struct list_head pages_list;
105 struct page_info *info, *tmp_info;
Laura Abbott8c017362011-09-22 20:59:12 -0700106 struct ion_iommu_priv_data *data = NULL;
107
108 if (msm_use_iommu()) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800109 struct scatterlist *sg;
110 struct sg_table *table;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800111 int j;
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800112 void *ptr = NULL;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800113 unsigned int npages_to_vmap, total_pages, num_large_pages = 0;
114 long size_remaining = PAGE_ALIGN(size);
Mitchel Humpherys1010c7f2013-02-22 17:36:16 -0800115 unsigned int max_order = ION_IS_CACHED(flags) ? 0 : orders[0];
Laura Abbottb14ed962012-01-30 14:18:08 -0800116
Laura Abbott8c017362011-09-22 20:59:12 -0700117 data = kmalloc(sizeof(*data), GFP_KERNEL);
118 if (!data)
119 return -ENOMEM;
120
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800121 INIT_LIST_HEAD(&pages_list);
122 while (size_remaining > 0) {
123 info = alloc_largest_available(size_remaining,
124 max_order);
125 if (!info) {
126 ret = -ENOMEM;
127 goto err_free_data;
128 }
129 list_add_tail(&info->list, &pages_list);
130 size_remaining -= order_to_size(info->order);
131 max_order = info->order;
132 num_large_pages++;
133 }
134
Laura Abbott8c017362011-09-22 20:59:12 -0700135 data->size = PFN_ALIGN(size);
136 data->nrpages = data->size >> PAGE_SHIFT;
137 data->pages = kzalloc(sizeof(struct page *)*data->nrpages,
138 GFP_KERNEL);
139 if (!data->pages) {
140 ret = -ENOMEM;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800141 goto err_free_data;
Laura Abbott8c017362011-09-22 20:59:12 -0700142 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800143
144 table = buffer->sg_table =
145 kzalloc(sizeof(struct sg_table), GFP_KERNEL);
146
147 if (!table) {
Olav Haugan16cdb412012-03-27 13:02:17 -0700148 ret = -ENOMEM;
149 goto err1;
150 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800151 ret = sg_alloc_table(table, num_large_pages, GFP_KERNEL);
Laura Abbottb14ed962012-01-30 14:18:08 -0800152 if (ret)
153 goto err2;
Olav Haugan16cdb412012-03-27 13:02:17 -0700154
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800155 i = 0;
156 sg = table->sgl;
157 list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
158 struct page *page = info->page;
159 sg_set_page(sg, page, order_to_size(info->order), 0);
Laura Abbott60c92c72012-10-10 13:12:52 -0700160 sg_dma_address(sg) = sg_phys(sg);
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800161 sg = sg_next(sg);
162 for (j = 0; j < (1 << info->order); ++j)
163 data->pages[i++] = nth_page(page, j);
164 list_del(&info->list);
165 kfree(info);
Laura Abbott8c017362011-09-22 20:59:12 -0700166 }
167
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800168 /*
169 * As an optimization, we omit __GFP_ZERO from
170 * alloc_page above and manually zero out all of the
171 * pages in one fell swoop here. To safeguard against
172 * insufficient vmalloc space, we only vmap
173 * `npages_to_vmap' at a time, starting with a
174 * conservative estimate of 1/8 of the total number of
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800175 * vmalloc pages available. Note that the `pages'
176 * array is composed of all 4K pages, irrespective of
177 * the size of the pages on the sg list.
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800178 */
179 npages_to_vmap = ((VMALLOC_END - VMALLOC_START)/8)
180 >> PAGE_SHIFT;
181 total_pages = data->nrpages;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800182 for (i = 0; i < total_pages; i += npages_to_vmap) {
183 npages_to_vmap = min(npages_to_vmap, total_pages - i);
184 for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap;
185 ++j) {
186 ptr = vmap(&data->pages[i], npages_to_vmap,
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800187 VM_IOREMAP, pgprot_kernel);
188 if (ptr)
189 break;
190 else
191 npages_to_vmap >>= 1;
192 }
193 if (!ptr) {
194 pr_err("Couldn't vmap the pages for zeroing\n");
195 ret = -ENOMEM;
196 goto err3;
197 }
198 memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
199 vunmap(ptr);
200 }
201
Laura Abbott60c92c72012-10-10 13:12:52 -0700202 if (!ION_IS_CACHED(flags))
203 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
204 DMA_BIDIRECTIONAL);
205
Laura Abbott8c017362011-09-22 20:59:12 -0700206 buffer->priv_virt = data;
207 return 0;
208
209 } else {
210 return -ENOMEM;
211 }
212
213
Laura Abbottb14ed962012-01-30 14:18:08 -0800214err3:
215 sg_free_table(buffer->sg_table);
Laura Abbott8c017362011-09-22 20:59:12 -0700216err2:
Laura Abbottb14ed962012-01-30 14:18:08 -0800217 kfree(buffer->sg_table);
218 buffer->sg_table = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700219err1:
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800220 kfree(data->pages);
221err_free_data:
Laura Abbott8c017362011-09-22 20:59:12 -0700222 kfree(data);
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800223
224 list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
225 if (info->page)
226 __free_pages(info->page, info->order);
227 list_del(&info->list);
228 kfree(info);
229 }
Laura Abbott8c017362011-09-22 20:59:12 -0700230 return ret;
231}
232
233static void ion_iommu_heap_free(struct ion_buffer *buffer)
234{
Laura Abbott8c017362011-09-22 20:59:12 -0700235 int i;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800236 struct scatterlist *sg;
237 struct sg_table *table = buffer->sg_table;
238 struct ion_iommu_priv_data *data = buffer->priv_virt;
Laura Abbott8c017362011-09-22 20:59:12 -0700239
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800240 if (!table)
241 return;
Laura Abbott8c017362011-09-22 20:59:12 -0700242 if (!data)
243 return;
244
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800245 for_each_sg(table->sgl, sg, table->nents, i)
246 __free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
Laura Abbott8c017362011-09-22 20:59:12 -0700247
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800248 sg_free_table(table);
249 kfree(table);
250 table = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700251 kfree(data->pages);
252 kfree(data);
253}
254
255void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
Laura Abbottb14ed962012-01-30 14:18:08 -0800256 struct ion_buffer *buffer)
Laura Abbott8c017362011-09-22 20:59:12 -0700257{
258 struct ion_iommu_priv_data *data = buffer->priv_virt;
259 pgprot_t page_prot = PAGE_KERNEL;
260
261 if (!data)
262 return NULL;
263
Laura Abbottb14ed962012-01-30 14:18:08 -0800264 if (!ION_IS_CACHED(buffer->flags))
Olav Haugan08f911b2012-12-13 09:51:59 -0800265 page_prot = pgprot_writecombine(page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700266
267 buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
268
269 return buffer->vaddr;
270}
271
272void ion_iommu_heap_unmap_kernel(struct ion_heap *heap,
273 struct ion_buffer *buffer)
274{
275 if (!buffer->vaddr)
276 return;
277
278 vunmap(buffer->vaddr);
279 buffer->vaddr = NULL;
280}
281
282int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800283 struct vm_area_struct *vma)
Laura Abbott8c017362011-09-22 20:59:12 -0700284{
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800285 struct sg_table *table = buffer->sg_table;
286 unsigned long addr = vma->vm_start;
287 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
288 struct scatterlist *sg;
Laura Abbott8c017362011-09-22 20:59:12 -0700289 int i;
Laura Abbott8c017362011-09-22 20:59:12 -0700290
Laura Abbottb14ed962012-01-30 14:18:08 -0800291 if (!ION_IS_CACHED(buffer->flags))
Olav Haugande074a72012-02-22 15:39:54 -0800292 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700293
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800294 for_each_sg(table->sgl, sg, table->nents, i) {
295 struct page *page = sg_page(sg);
296 unsigned long remainder = vma->vm_end - addr;
297 unsigned long len = sg_dma_len(sg);
298
299 if (offset >= sg_dma_len(sg)) {
300 offset -= sg_dma_len(sg);
301 continue;
302 } else if (offset) {
303 page += offset / PAGE_SIZE;
304 len = sg_dma_len(sg) - offset;
305 offset = 0;
Olav Haugandbec7db2012-02-25 10:32:41 -0800306 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800307 len = min(len, remainder);
308 remap_pfn_range(vma, addr, page_to_pfn(page), len,
309 vma->vm_page_prot);
310 addr += len;
311 if (addr >= vma->vm_end)
312 return 0;
Olav Haugandbec7db2012-02-25 10:32:41 -0800313 }
Laura Abbott8c017362011-09-22 20:59:12 -0700314 return 0;
315}
316
Laura Abbottb14ed962012-01-30 14:18:08 -0800317static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap,
Olav Hauganab804b82012-03-05 14:41:16 -0800318 struct ion_buffer *buffer)
319{
Laura Abbottb14ed962012-01-30 14:18:08 -0800320 return buffer->sg_table;
Olav Hauganab804b82012-03-05 14:41:16 -0800321}
322
323static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
324 struct ion_buffer *buffer)
325{
Olav Hauganab804b82012-03-05 14:41:16 -0800326}
327
Laura Abbott8c017362011-09-22 20:59:12 -0700328static struct ion_heap_ops iommu_heap_ops = {
329 .allocate = ion_iommu_heap_allocate,
330 .free = ion_iommu_heap_free,
331 .map_user = ion_iommu_heap_map_user,
332 .map_kernel = ion_iommu_heap_map_kernel,
333 .unmap_kernel = ion_iommu_heap_unmap_kernel,
Olav Hauganab804b82012-03-05 14:41:16 -0800334 .map_dma = ion_iommu_heap_map_dma,
335 .unmap_dma = ion_iommu_heap_unmap_dma,
Laura Abbott8c017362011-09-22 20:59:12 -0700336};
337
338struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
339{
340 struct ion_iommu_heap *iommu_heap;
341
342 iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL);
343 if (!iommu_heap)
344 return ERR_PTR(-ENOMEM);
345
346 iommu_heap->heap.ops = &iommu_heap_ops;
347 iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
Laura Abbott8c017362011-09-22 20:59:12 -0700348
349 return &iommu_heap->heap;
350}
351
352void ion_iommu_heap_destroy(struct ion_heap *heap)
353{
354 struct ion_iommu_heap *iommu_heap =
355 container_of(heap, struct ion_iommu_heap, heap);
356
357 kfree(iommu_heap);
358 iommu_heap = NULL;
359}