blob: 304a39ec7ca4da858bacf3f72f3efe440c48cff2 [file] [log] [blame]
Laura Abbott8c017362011-09-22 20:59:12 -07001/*
Mitchel Humpherysd1a69032013-01-31 10:30:40 -08002 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Laura Abbott8c017362011-09-22 20:59:12 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/err.h>
14#include <linux/io.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070015#include <linux/msm_ion.h>
Laura Abbott8c017362011-09-22 20:59:12 -070016#include <linux/mm.h>
Mitchel Humpherysb358e072013-02-01 18:30:14 -080017#include <linux/highmem.h>
Laura Abbott8c017362011-09-22 20:59:12 -070018#include <linux/scatterlist.h>
19#include <linux/slab.h>
20#include <linux/vmalloc.h>
21#include <linux/iommu.h>
22#include <linux/pfn.h>
Laura Abbott60c92c72012-10-10 13:12:52 -070023#include <linux/dma-mapping.h>
Laura Abbott8c017362011-09-22 20:59:12 -070024#include "ion_priv.h"
25
26#include <asm/mach/map.h>
27#include <asm/page.h>
Olav Haugan85c95402012-05-30 17:32:37 -070028#include <asm/cacheflush.h>
Laura Abbott8c017362011-09-22 20:59:12 -070029#include <mach/iommu_domains.h>
30
31struct ion_iommu_heap {
32 struct ion_heap heap;
Olav Haugan85c95402012-05-30 17:32:37 -070033 unsigned int has_outer_cache;
Laura Abbott8c017362011-09-22 20:59:12 -070034};
35
Mitchel Humpherysb358e072013-02-01 18:30:14 -080036/*
37 * We will attempt to allocate high-order pages and store those in an
38 * sg_list. However, some APIs expect an array of struct page * where
39 * each page is of size PAGE_SIZE. We use this extra structure to
40 * carry around an array of such pages (derived from the high-order
41 * pages with nth_page).
42 */
Laura Abbott8c017362011-09-22 20:59:12 -070043struct ion_iommu_priv_data {
44 struct page **pages;
45 int nrpages;
46 unsigned long size;
47};
48
Mitchel Humpherysd1a69032013-01-31 10:30:40 -080049#define MAX_VMAP_RETRIES 10
50
Mitchel Humpherysb358e072013-02-01 18:30:14 -080051static const unsigned int orders[] = {8, 4, 0};
52static const int num_orders = ARRAY_SIZE(orders);
53
54struct page_info {
55 struct page *page;
56 unsigned int order;
57 struct list_head list;
58};
59
60static unsigned int order_to_size(int order)
61{
62 return PAGE_SIZE << order;
63}
64
65static struct page_info *alloc_largest_available(unsigned long size,
66 unsigned int max_order)
67{
68 struct page *page;
69 struct page_info *info;
70 int i;
71
72 for (i = 0; i < num_orders; i++) {
73 if (size < order_to_size(orders[i]))
74 continue;
75 if (max_order < orders[i])
76 continue;
77
78 page = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM | __GFP_COMP,
79 orders[i]);
80 if (!page)
81 continue;
82
83 info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
84 info->page = page;
85 info->order = orders[i];
86 return info;
87 }
88 return NULL;
89}
90
Laura Abbott8c017362011-09-22 20:59:12 -070091static int ion_iommu_heap_allocate(struct ion_heap *heap,
92 struct ion_buffer *buffer,
93 unsigned long size, unsigned long align,
94 unsigned long flags)
95{
96 int ret, i;
Mitchel Humpherysb358e072013-02-01 18:30:14 -080097 struct list_head pages_list;
98 struct page_info *info, *tmp_info;
Laura Abbott8c017362011-09-22 20:59:12 -070099 struct ion_iommu_priv_data *data = NULL;
100
101 if (msm_use_iommu()) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800102 struct scatterlist *sg;
103 struct sg_table *table;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800104 int j;
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800105 void *ptr = NULL;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800106 unsigned int npages_to_vmap, total_pages, num_large_pages = 0;
107 long size_remaining = PAGE_ALIGN(size);
108 unsigned int max_order = orders[0];
Laura Abbottb14ed962012-01-30 14:18:08 -0800109
Laura Abbott8c017362011-09-22 20:59:12 -0700110 data = kmalloc(sizeof(*data), GFP_KERNEL);
111 if (!data)
112 return -ENOMEM;
113
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800114 INIT_LIST_HEAD(&pages_list);
115 while (size_remaining > 0) {
116 info = alloc_largest_available(size_remaining,
117 max_order);
118 if (!info) {
119 ret = -ENOMEM;
120 goto err_free_data;
121 }
122 list_add_tail(&info->list, &pages_list);
123 size_remaining -= order_to_size(info->order);
124 max_order = info->order;
125 num_large_pages++;
126 }
127
Laura Abbott8c017362011-09-22 20:59:12 -0700128 data->size = PFN_ALIGN(size);
129 data->nrpages = data->size >> PAGE_SHIFT;
130 data->pages = kzalloc(sizeof(struct page *)*data->nrpages,
131 GFP_KERNEL);
132 if (!data->pages) {
133 ret = -ENOMEM;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800134 goto err_free_data;
Laura Abbott8c017362011-09-22 20:59:12 -0700135 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800136
137 table = buffer->sg_table =
138 kzalloc(sizeof(struct sg_table), GFP_KERNEL);
139
140 if (!table) {
Olav Haugan16cdb412012-03-27 13:02:17 -0700141 ret = -ENOMEM;
142 goto err1;
143 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800144 ret = sg_alloc_table(table, num_large_pages, GFP_KERNEL);
Laura Abbottb14ed962012-01-30 14:18:08 -0800145 if (ret)
146 goto err2;
Olav Haugan16cdb412012-03-27 13:02:17 -0700147
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800148 i = 0;
149 sg = table->sgl;
150 list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
151 struct page *page = info->page;
152 sg_set_page(sg, page, order_to_size(info->order), 0);
Laura Abbott60c92c72012-10-10 13:12:52 -0700153 sg_dma_address(sg) = sg_phys(sg);
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800154 sg = sg_next(sg);
155 for (j = 0; j < (1 << info->order); ++j)
156 data->pages[i++] = nth_page(page, j);
157 list_del(&info->list);
158 kfree(info);
Laura Abbott8c017362011-09-22 20:59:12 -0700159 }
160
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800161 /*
162 * As an optimization, we omit __GFP_ZERO from
163 * alloc_page above and manually zero out all of the
164 * pages in one fell swoop here. To safeguard against
165 * insufficient vmalloc space, we only vmap
166 * `npages_to_vmap' at a time, starting with a
167 * conservative estimate of 1/8 of the total number of
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800168 * vmalloc pages available. Note that the `pages'
169 * array is composed of all 4K pages, irrespective of
170 * the size of the pages on the sg list.
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800171 */
172 npages_to_vmap = ((VMALLOC_END - VMALLOC_START)/8)
173 >> PAGE_SHIFT;
174 total_pages = data->nrpages;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800175 for (i = 0; i < total_pages; i += npages_to_vmap) {
176 npages_to_vmap = min(npages_to_vmap, total_pages - i);
177 for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap;
178 ++j) {
179 ptr = vmap(&data->pages[i], npages_to_vmap,
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800180 VM_IOREMAP, pgprot_kernel);
181 if (ptr)
182 break;
183 else
184 npages_to_vmap >>= 1;
185 }
186 if (!ptr) {
187 pr_err("Couldn't vmap the pages for zeroing\n");
188 ret = -ENOMEM;
189 goto err3;
190 }
191 memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
192 vunmap(ptr);
193 }
194
Laura Abbott60c92c72012-10-10 13:12:52 -0700195 if (!ION_IS_CACHED(flags))
196 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
197 DMA_BIDIRECTIONAL);
198
Laura Abbott8c017362011-09-22 20:59:12 -0700199 buffer->priv_virt = data;
200 return 0;
201
202 } else {
203 return -ENOMEM;
204 }
205
206
Laura Abbottb14ed962012-01-30 14:18:08 -0800207err3:
208 sg_free_table(buffer->sg_table);
Laura Abbott8c017362011-09-22 20:59:12 -0700209err2:
Laura Abbottb14ed962012-01-30 14:18:08 -0800210 kfree(buffer->sg_table);
211 buffer->sg_table = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700212err1:
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800213 kfree(data->pages);
214err_free_data:
Laura Abbott8c017362011-09-22 20:59:12 -0700215 kfree(data);
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800216
217 list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
218 if (info->page)
219 __free_pages(info->page, info->order);
220 list_del(&info->list);
221 kfree(info);
222 }
Laura Abbott8c017362011-09-22 20:59:12 -0700223 return ret;
224}
225
226static void ion_iommu_heap_free(struct ion_buffer *buffer)
227{
Laura Abbott8c017362011-09-22 20:59:12 -0700228 int i;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800229 struct scatterlist *sg;
230 struct sg_table *table = buffer->sg_table;
231 struct ion_iommu_priv_data *data = buffer->priv_virt;
Laura Abbott8c017362011-09-22 20:59:12 -0700232
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800233 if (!table)
234 return;
Laura Abbott8c017362011-09-22 20:59:12 -0700235 if (!data)
236 return;
237
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800238 for_each_sg(table->sgl, sg, table->nents, i)
239 __free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
Laura Abbott8c017362011-09-22 20:59:12 -0700240
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800241 sg_free_table(table);
242 kfree(table);
243 table = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700244 kfree(data->pages);
245 kfree(data);
246}
247
248void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
Laura Abbottb14ed962012-01-30 14:18:08 -0800249 struct ion_buffer *buffer)
Laura Abbott8c017362011-09-22 20:59:12 -0700250{
251 struct ion_iommu_priv_data *data = buffer->priv_virt;
252 pgprot_t page_prot = PAGE_KERNEL;
253
254 if (!data)
255 return NULL;
256
Laura Abbottb14ed962012-01-30 14:18:08 -0800257 if (!ION_IS_CACHED(buffer->flags))
Olav Haugan08f911b2012-12-13 09:51:59 -0800258 page_prot = pgprot_writecombine(page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700259
260 buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
261
262 return buffer->vaddr;
263}
264
265void ion_iommu_heap_unmap_kernel(struct ion_heap *heap,
266 struct ion_buffer *buffer)
267{
268 if (!buffer->vaddr)
269 return;
270
271 vunmap(buffer->vaddr);
272 buffer->vaddr = NULL;
273}
274
275int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800276 struct vm_area_struct *vma)
Laura Abbott8c017362011-09-22 20:59:12 -0700277{
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800278 struct sg_table *table = buffer->sg_table;
279 unsigned long addr = vma->vm_start;
280 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
281 struct scatterlist *sg;
Laura Abbott8c017362011-09-22 20:59:12 -0700282 int i;
Laura Abbott8c017362011-09-22 20:59:12 -0700283
Laura Abbottb14ed962012-01-30 14:18:08 -0800284 if (!ION_IS_CACHED(buffer->flags))
Olav Haugande074a72012-02-22 15:39:54 -0800285 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700286
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800287 for_each_sg(table->sgl, sg, table->nents, i) {
288 struct page *page = sg_page(sg);
289 unsigned long remainder = vma->vm_end - addr;
290 unsigned long len = sg_dma_len(sg);
291
292 if (offset >= sg_dma_len(sg)) {
293 offset -= sg_dma_len(sg);
294 continue;
295 } else if (offset) {
296 page += offset / PAGE_SIZE;
297 len = sg_dma_len(sg) - offset;
298 offset = 0;
Olav Haugandbec7db2012-02-25 10:32:41 -0800299 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800300 len = min(len, remainder);
301 remap_pfn_range(vma, addr, page_to_pfn(page), len,
302 vma->vm_page_prot);
303 addr += len;
304 if (addr >= vma->vm_end)
305 return 0;
Olav Haugandbec7db2012-02-25 10:32:41 -0800306 }
Laura Abbott8c017362011-09-22 20:59:12 -0700307 return 0;
308}
309
310int ion_iommu_heap_map_iommu(struct ion_buffer *buffer,
311 struct ion_iommu_map *data,
312 unsigned int domain_num,
313 unsigned int partition_num,
314 unsigned long align,
315 unsigned long iova_length,
316 unsigned long flags)
317{
Laura Abbott8c017362011-09-22 20:59:12 -0700318 struct iommu_domain *domain;
Olav Haugan16cdb412012-03-27 13:02:17 -0700319 int ret = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700320 unsigned long extra;
Olav Hauganf310cf22012-05-08 08:42:49 -0700321 int prot = IOMMU_WRITE | IOMMU_READ;
322 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700323
324 BUG_ON(!msm_use_iommu());
325
326 data->mapped_size = iova_length;
327 extra = iova_length - buffer->size;
328
Laura Abbottd01221b2012-05-16 17:52:49 -0700329 ret = msm_allocate_iova_address(domain_num, partition_num,
330 data->mapped_size, align,
331 &data->iova_addr);
Laura Abbott8c017362011-09-22 20:59:12 -0700332
Laura Abbotte27cdcd2012-06-21 07:58:41 -0700333 if (ret)
Laura Abbott8c017362011-09-22 20:59:12 -0700334 goto out;
Laura Abbott8c017362011-09-22 20:59:12 -0700335
336 domain = msm_get_iommu_domain(domain_num);
337
338 if (!domain) {
339 ret = -ENOMEM;
340 goto out1;
341 }
342
Olav Haugan16cdb412012-03-27 13:02:17 -0700343 ret = iommu_map_range(domain, data->iova_addr,
Laura Abbottb14ed962012-01-30 14:18:08 -0800344 buffer->sg_table->sgl,
345 buffer->size, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700346 if (ret) {
347 pr_err("%s: could not map %lx in domain %p\n",
348 __func__, data->iova_addr, domain);
349 goto out1;
Laura Abbott8c017362011-09-22 20:59:12 -0700350 }
351
Olav Haugan16cdb412012-03-27 13:02:17 -0700352 if (extra) {
353 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800354 unsigned long phys_addr = sg_phys(buffer->sg_table->sgl);
355 ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
356 extra, SZ_4K, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700357 if (ret)
358 goto out2;
359 }
360 return ret;
Laura Abbott8c017362011-09-22 20:59:12 -0700361
362out2:
Olav Haugan16cdb412012-03-27 13:02:17 -0700363 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Laura Abbott8c017362011-09-22 20:59:12 -0700364out1:
365 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
366 buffer->size);
367
368out:
369
370 return ret;
371}
372
373void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
374{
Laura Abbott8c017362011-09-22 20:59:12 -0700375 unsigned int domain_num;
376 unsigned int partition_num;
377 struct iommu_domain *domain;
378
379 BUG_ON(!msm_use_iommu());
380
381 domain_num = iommu_map_domain(data);
382 partition_num = iommu_map_partition(data);
383
384 domain = msm_get_iommu_domain(domain_num);
385
386 if (!domain) {
387 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
388 return;
389 }
390
Olav Haugan16cdb412012-03-27 13:02:17 -0700391 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Laura Abbott8c017362011-09-22 20:59:12 -0700392 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
393 data->mapped_size);
394
395 return;
396}
397
Olav Hauganef010712012-03-05 14:19:46 -0800398static int ion_iommu_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
399 void *vaddr, unsigned int offset, unsigned int length,
400 unsigned int cmd)
401{
Olav Haugan85c95402012-05-30 17:32:37 -0700402 void (*outer_cache_op)(phys_addr_t, phys_addr_t);
403 struct ion_iommu_heap *iommu_heap =
404 container_of(heap, struct ion_iommu_heap, heap);
Olav Hauganef010712012-03-05 14:19:46 -0800405
406 switch (cmd) {
407 case ION_IOC_CLEAN_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700408 dmac_clean_range(vaddr, vaddr + length);
409 outer_cache_op = outer_clean_range;
Olav Hauganef010712012-03-05 14:19:46 -0800410 break;
411 case ION_IOC_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700412 dmac_inv_range(vaddr, vaddr + length);
413 outer_cache_op = outer_inv_range;
Olav Hauganef010712012-03-05 14:19:46 -0800414 break;
415 case ION_IOC_CLEAN_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700416 dmac_flush_range(vaddr, vaddr + length);
417 outer_cache_op = outer_flush_range;
Olav Hauganef010712012-03-05 14:19:46 -0800418 break;
419 default:
420 return -EINVAL;
421 }
422
Olav Haugan85c95402012-05-30 17:32:37 -0700423 if (iommu_heap->has_outer_cache) {
424 unsigned long pstart;
425 unsigned int i;
426 struct ion_iommu_priv_data *data = buffer->priv_virt;
427 if (!data)
428 return -ENOMEM;
Olav Hauganef010712012-03-05 14:19:46 -0800429
Olav Haugan85c95402012-05-30 17:32:37 -0700430 for (i = 0; i < data->nrpages; ++i) {
431 pstart = page_to_phys(data->pages[i]);
432 outer_cache_op(pstart, pstart + PAGE_SIZE);
433 }
434 }
Olav Hauganef010712012-03-05 14:19:46 -0800435 return 0;
436}
Laura Abbott8c017362011-09-22 20:59:12 -0700437
Laura Abbottb14ed962012-01-30 14:18:08 -0800438static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap,
Olav Hauganab804b82012-03-05 14:41:16 -0800439 struct ion_buffer *buffer)
440{
Laura Abbottb14ed962012-01-30 14:18:08 -0800441 return buffer->sg_table;
Olav Hauganab804b82012-03-05 14:41:16 -0800442}
443
444static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
445 struct ion_buffer *buffer)
446{
Olav Hauganab804b82012-03-05 14:41:16 -0800447}
448
Laura Abbott8c017362011-09-22 20:59:12 -0700449static struct ion_heap_ops iommu_heap_ops = {
450 .allocate = ion_iommu_heap_allocate,
451 .free = ion_iommu_heap_free,
452 .map_user = ion_iommu_heap_map_user,
453 .map_kernel = ion_iommu_heap_map_kernel,
454 .unmap_kernel = ion_iommu_heap_unmap_kernel,
455 .map_iommu = ion_iommu_heap_map_iommu,
456 .unmap_iommu = ion_iommu_heap_unmap_iommu,
Olav Hauganef010712012-03-05 14:19:46 -0800457 .cache_op = ion_iommu_cache_ops,
Olav Hauganab804b82012-03-05 14:41:16 -0800458 .map_dma = ion_iommu_heap_map_dma,
459 .unmap_dma = ion_iommu_heap_unmap_dma,
Laura Abbott8c017362011-09-22 20:59:12 -0700460};
461
462struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
463{
464 struct ion_iommu_heap *iommu_heap;
465
466 iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL);
467 if (!iommu_heap)
468 return ERR_PTR(-ENOMEM);
469
470 iommu_heap->heap.ops = &iommu_heap_ops;
471 iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
Olav Haugan85c95402012-05-30 17:32:37 -0700472 iommu_heap->has_outer_cache = heap_data->has_outer_cache;
Laura Abbott8c017362011-09-22 20:59:12 -0700473
474 return &iommu_heap->heap;
475}
476
477void ion_iommu_heap_destroy(struct ion_heap *heap)
478{
479 struct ion_iommu_heap *iommu_heap =
480 container_of(heap, struct ion_iommu_heap, heap);
481
482 kfree(iommu_heap);
483 iommu_heap = NULL;
484}