blob: 761fddee365d722a03fa3413ff4409882e7b8a69 [file] [log] [blame]
Laura Abbott8c017362011-09-22 20:59:12 -07001/*
Mitchel Humpherysd1a69032013-01-31 10:30:40 -08002 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Laura Abbott8c017362011-09-22 20:59:12 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/err.h>
14#include <linux/io.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070015#include <linux/msm_ion.h>
Laura Abbott8c017362011-09-22 20:59:12 -070016#include <linux/mm.h>
Mitchel Humpherysb358e072013-02-01 18:30:14 -080017#include <linux/highmem.h>
Laura Abbott8c017362011-09-22 20:59:12 -070018#include <linux/scatterlist.h>
19#include <linux/slab.h>
20#include <linux/vmalloc.h>
21#include <linux/iommu.h>
22#include <linux/pfn.h>
Laura Abbott60c92c72012-10-10 13:12:52 -070023#include <linux/dma-mapping.h>
Laura Abbott8c017362011-09-22 20:59:12 -070024#include "ion_priv.h"
25
26#include <asm/mach/map.h>
27#include <asm/page.h>
Olav Haugan85c95402012-05-30 17:32:37 -070028#include <asm/cacheflush.h>
Laura Abbott8c017362011-09-22 20:59:12 -070029#include <mach/iommu_domains.h>
30
31struct ion_iommu_heap {
32 struct ion_heap heap;
Olav Haugan85c95402012-05-30 17:32:37 -070033 unsigned int has_outer_cache;
Laura Abbott8c017362011-09-22 20:59:12 -070034};
35
Mitchel Humpherysb358e072013-02-01 18:30:14 -080036/*
37 * We will attempt to allocate high-order pages and store those in an
38 * sg_list. However, some APIs expect an array of struct page * where
39 * each page is of size PAGE_SIZE. We use this extra structure to
40 * carry around an array of such pages (derived from the high-order
41 * pages with nth_page).
42 */
Laura Abbott8c017362011-09-22 20:59:12 -070043struct ion_iommu_priv_data {
44 struct page **pages;
45 int nrpages;
46 unsigned long size;
47};
48
Mitchel Humpherysd1a69032013-01-31 10:30:40 -080049#define MAX_VMAP_RETRIES 10
50
Mitchel Humpherysb358e072013-02-01 18:30:14 -080051static const unsigned int orders[] = {8, 4, 0};
52static const int num_orders = ARRAY_SIZE(orders);
53
54struct page_info {
55 struct page *page;
56 unsigned int order;
57 struct list_head list;
58};
59
60static unsigned int order_to_size(int order)
61{
62 return PAGE_SIZE << order;
63}
64
65static struct page_info *alloc_largest_available(unsigned long size,
66 unsigned int max_order)
67{
68 struct page *page;
69 struct page_info *info;
70 int i;
71
72 for (i = 0; i < num_orders; i++) {
Mitchel Humpherysff6ea622013-02-20 13:55:46 -080073 gfp_t gfp;
Mitchel Humpherysb358e072013-02-01 18:30:14 -080074 if (size < order_to_size(orders[i]))
75 continue;
76 if (max_order < orders[i])
77 continue;
78
Mitchel Humpherysff6ea622013-02-20 13:55:46 -080079 gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COMP;
80 if (orders[i])
81 gfp |= __GFP_NOWARN;
82
83 page = alloc_pages(gfp, orders[i]);
Mitchel Humpherysb358e072013-02-01 18:30:14 -080084 if (!page)
85 continue;
86
87 info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
88 info->page = page;
89 info->order = orders[i];
90 return info;
91 }
92 return NULL;
93}
94
Laura Abbott8c017362011-09-22 20:59:12 -070095static int ion_iommu_heap_allocate(struct ion_heap *heap,
96 struct ion_buffer *buffer,
97 unsigned long size, unsigned long align,
98 unsigned long flags)
99{
100 int ret, i;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800101 struct list_head pages_list;
102 struct page_info *info, *tmp_info;
Laura Abbott8c017362011-09-22 20:59:12 -0700103 struct ion_iommu_priv_data *data = NULL;
104
105 if (msm_use_iommu()) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800106 struct scatterlist *sg;
107 struct sg_table *table;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800108 int j;
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800109 void *ptr = NULL;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800110 unsigned int npages_to_vmap, total_pages, num_large_pages = 0;
111 long size_remaining = PAGE_ALIGN(size);
Mitchel Humpherys1010c7f2013-02-22 17:36:16 -0800112 unsigned int max_order = ION_IS_CACHED(flags) ? 0 : orders[0];
Laura Abbottb14ed962012-01-30 14:18:08 -0800113
Laura Abbott8c017362011-09-22 20:59:12 -0700114 data = kmalloc(sizeof(*data), GFP_KERNEL);
115 if (!data)
116 return -ENOMEM;
117
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800118 INIT_LIST_HEAD(&pages_list);
119 while (size_remaining > 0) {
120 info = alloc_largest_available(size_remaining,
121 max_order);
122 if (!info) {
123 ret = -ENOMEM;
124 goto err_free_data;
125 }
126 list_add_tail(&info->list, &pages_list);
127 size_remaining -= order_to_size(info->order);
128 max_order = info->order;
129 num_large_pages++;
130 }
131
Laura Abbott8c017362011-09-22 20:59:12 -0700132 data->size = PFN_ALIGN(size);
133 data->nrpages = data->size >> PAGE_SHIFT;
134 data->pages = kzalloc(sizeof(struct page *)*data->nrpages,
135 GFP_KERNEL);
136 if (!data->pages) {
137 ret = -ENOMEM;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800138 goto err_free_data;
Laura Abbott8c017362011-09-22 20:59:12 -0700139 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800140
141 table = buffer->sg_table =
142 kzalloc(sizeof(struct sg_table), GFP_KERNEL);
143
144 if (!table) {
Olav Haugan16cdb412012-03-27 13:02:17 -0700145 ret = -ENOMEM;
146 goto err1;
147 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800148 ret = sg_alloc_table(table, num_large_pages, GFP_KERNEL);
Laura Abbottb14ed962012-01-30 14:18:08 -0800149 if (ret)
150 goto err2;
Olav Haugan16cdb412012-03-27 13:02:17 -0700151
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800152 i = 0;
153 sg = table->sgl;
154 list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
155 struct page *page = info->page;
156 sg_set_page(sg, page, order_to_size(info->order), 0);
Laura Abbott60c92c72012-10-10 13:12:52 -0700157 sg_dma_address(sg) = sg_phys(sg);
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800158 sg = sg_next(sg);
159 for (j = 0; j < (1 << info->order); ++j)
160 data->pages[i++] = nth_page(page, j);
161 list_del(&info->list);
162 kfree(info);
Laura Abbott8c017362011-09-22 20:59:12 -0700163 }
164
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800165 /*
166 * As an optimization, we omit __GFP_ZERO from
167 * alloc_page above and manually zero out all of the
168 * pages in one fell swoop here. To safeguard against
169 * insufficient vmalloc space, we only vmap
170 * `npages_to_vmap' at a time, starting with a
171 * conservative estimate of 1/8 of the total number of
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800172 * vmalloc pages available. Note that the `pages'
173 * array is composed of all 4K pages, irrespective of
174 * the size of the pages on the sg list.
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800175 */
176 npages_to_vmap = ((VMALLOC_END - VMALLOC_START)/8)
177 >> PAGE_SHIFT;
178 total_pages = data->nrpages;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800179 for (i = 0; i < total_pages; i += npages_to_vmap) {
180 npages_to_vmap = min(npages_to_vmap, total_pages - i);
181 for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap;
182 ++j) {
183 ptr = vmap(&data->pages[i], npages_to_vmap,
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800184 VM_IOREMAP, pgprot_kernel);
185 if (ptr)
186 break;
187 else
188 npages_to_vmap >>= 1;
189 }
190 if (!ptr) {
191 pr_err("Couldn't vmap the pages for zeroing\n");
192 ret = -ENOMEM;
193 goto err3;
194 }
195 memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
196 vunmap(ptr);
197 }
198
Laura Abbott60c92c72012-10-10 13:12:52 -0700199 if (!ION_IS_CACHED(flags))
200 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
201 DMA_BIDIRECTIONAL);
202
Laura Abbott8c017362011-09-22 20:59:12 -0700203 buffer->priv_virt = data;
204 return 0;
205
206 } else {
207 return -ENOMEM;
208 }
209
210
Laura Abbottb14ed962012-01-30 14:18:08 -0800211err3:
212 sg_free_table(buffer->sg_table);
Laura Abbott8c017362011-09-22 20:59:12 -0700213err2:
Laura Abbottb14ed962012-01-30 14:18:08 -0800214 kfree(buffer->sg_table);
215 buffer->sg_table = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700216err1:
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800217 kfree(data->pages);
218err_free_data:
Laura Abbott8c017362011-09-22 20:59:12 -0700219 kfree(data);
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800220
221 list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
222 if (info->page)
223 __free_pages(info->page, info->order);
224 list_del(&info->list);
225 kfree(info);
226 }
Laura Abbott8c017362011-09-22 20:59:12 -0700227 return ret;
228}
229
230static void ion_iommu_heap_free(struct ion_buffer *buffer)
231{
Laura Abbott8c017362011-09-22 20:59:12 -0700232 int i;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800233 struct scatterlist *sg;
234 struct sg_table *table = buffer->sg_table;
235 struct ion_iommu_priv_data *data = buffer->priv_virt;
Laura Abbott8c017362011-09-22 20:59:12 -0700236
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800237 if (!table)
238 return;
Laura Abbott8c017362011-09-22 20:59:12 -0700239 if (!data)
240 return;
241
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800242 for_each_sg(table->sgl, sg, table->nents, i)
243 __free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
Laura Abbott8c017362011-09-22 20:59:12 -0700244
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800245 sg_free_table(table);
246 kfree(table);
247 table = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700248 kfree(data->pages);
249 kfree(data);
250}
251
252void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
Laura Abbottb14ed962012-01-30 14:18:08 -0800253 struct ion_buffer *buffer)
Laura Abbott8c017362011-09-22 20:59:12 -0700254{
255 struct ion_iommu_priv_data *data = buffer->priv_virt;
256 pgprot_t page_prot = PAGE_KERNEL;
257
258 if (!data)
259 return NULL;
260
Laura Abbottb14ed962012-01-30 14:18:08 -0800261 if (!ION_IS_CACHED(buffer->flags))
Olav Haugan08f911b2012-12-13 09:51:59 -0800262 page_prot = pgprot_writecombine(page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700263
264 buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
265
266 return buffer->vaddr;
267}
268
269void ion_iommu_heap_unmap_kernel(struct ion_heap *heap,
270 struct ion_buffer *buffer)
271{
272 if (!buffer->vaddr)
273 return;
274
275 vunmap(buffer->vaddr);
276 buffer->vaddr = NULL;
277}
278
279int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800280 struct vm_area_struct *vma)
Laura Abbott8c017362011-09-22 20:59:12 -0700281{
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800282 struct sg_table *table = buffer->sg_table;
283 unsigned long addr = vma->vm_start;
284 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
285 struct scatterlist *sg;
Laura Abbott8c017362011-09-22 20:59:12 -0700286 int i;
Laura Abbott8c017362011-09-22 20:59:12 -0700287
Laura Abbottb14ed962012-01-30 14:18:08 -0800288 if (!ION_IS_CACHED(buffer->flags))
Olav Haugande074a72012-02-22 15:39:54 -0800289 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700290
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800291 for_each_sg(table->sgl, sg, table->nents, i) {
292 struct page *page = sg_page(sg);
293 unsigned long remainder = vma->vm_end - addr;
294 unsigned long len = sg_dma_len(sg);
295
296 if (offset >= sg_dma_len(sg)) {
297 offset -= sg_dma_len(sg);
298 continue;
299 } else if (offset) {
300 page += offset / PAGE_SIZE;
301 len = sg_dma_len(sg) - offset;
302 offset = 0;
Olav Haugandbec7db2012-02-25 10:32:41 -0800303 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800304 len = min(len, remainder);
305 remap_pfn_range(vma, addr, page_to_pfn(page), len,
306 vma->vm_page_prot);
307 addr += len;
308 if (addr >= vma->vm_end)
309 return 0;
Olav Haugandbec7db2012-02-25 10:32:41 -0800310 }
Laura Abbott8c017362011-09-22 20:59:12 -0700311 return 0;
312}
313
314int ion_iommu_heap_map_iommu(struct ion_buffer *buffer,
315 struct ion_iommu_map *data,
316 unsigned int domain_num,
317 unsigned int partition_num,
318 unsigned long align,
319 unsigned long iova_length,
320 unsigned long flags)
321{
Laura Abbott8c017362011-09-22 20:59:12 -0700322 struct iommu_domain *domain;
Olav Haugan16cdb412012-03-27 13:02:17 -0700323 int ret = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700324 unsigned long extra;
Olav Hauganf310cf22012-05-08 08:42:49 -0700325 int prot = IOMMU_WRITE | IOMMU_READ;
326 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700327
328 BUG_ON(!msm_use_iommu());
329
330 data->mapped_size = iova_length;
331 extra = iova_length - buffer->size;
332
Laura Abbottd01221b2012-05-16 17:52:49 -0700333 ret = msm_allocate_iova_address(domain_num, partition_num,
334 data->mapped_size, align,
335 &data->iova_addr);
Laura Abbott8c017362011-09-22 20:59:12 -0700336
Laura Abbotte27cdcd2012-06-21 07:58:41 -0700337 if (ret)
Laura Abbott8c017362011-09-22 20:59:12 -0700338 goto out;
Laura Abbott8c017362011-09-22 20:59:12 -0700339
340 domain = msm_get_iommu_domain(domain_num);
341
342 if (!domain) {
343 ret = -ENOMEM;
344 goto out1;
345 }
346
Olav Haugan16cdb412012-03-27 13:02:17 -0700347 ret = iommu_map_range(domain, data->iova_addr,
Laura Abbottb14ed962012-01-30 14:18:08 -0800348 buffer->sg_table->sgl,
349 buffer->size, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700350 if (ret) {
351 pr_err("%s: could not map %lx in domain %p\n",
352 __func__, data->iova_addr, domain);
353 goto out1;
Laura Abbott8c017362011-09-22 20:59:12 -0700354 }
355
Olav Haugan16cdb412012-03-27 13:02:17 -0700356 if (extra) {
357 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800358 unsigned long phys_addr = sg_phys(buffer->sg_table->sgl);
359 ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
360 extra, SZ_4K, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700361 if (ret)
362 goto out2;
363 }
364 return ret;
Laura Abbott8c017362011-09-22 20:59:12 -0700365
366out2:
Olav Haugan16cdb412012-03-27 13:02:17 -0700367 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Laura Abbott8c017362011-09-22 20:59:12 -0700368out1:
369 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
370 buffer->size);
371
372out:
373
374 return ret;
375}
376
377void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
378{
Laura Abbott8c017362011-09-22 20:59:12 -0700379 unsigned int domain_num;
380 unsigned int partition_num;
381 struct iommu_domain *domain;
382
383 BUG_ON(!msm_use_iommu());
384
385 domain_num = iommu_map_domain(data);
386 partition_num = iommu_map_partition(data);
387
388 domain = msm_get_iommu_domain(domain_num);
389
390 if (!domain) {
391 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
392 return;
393 }
394
Olav Haugan16cdb412012-03-27 13:02:17 -0700395 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Laura Abbott8c017362011-09-22 20:59:12 -0700396 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
397 data->mapped_size);
398
399 return;
400}
401
Olav Hauganef010712012-03-05 14:19:46 -0800402static int ion_iommu_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
403 void *vaddr, unsigned int offset, unsigned int length,
404 unsigned int cmd)
405{
Olav Haugan85c95402012-05-30 17:32:37 -0700406 void (*outer_cache_op)(phys_addr_t, phys_addr_t);
407 struct ion_iommu_heap *iommu_heap =
408 container_of(heap, struct ion_iommu_heap, heap);
Olav Hauganef010712012-03-05 14:19:46 -0800409
410 switch (cmd) {
411 case ION_IOC_CLEAN_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700412 dmac_clean_range(vaddr, vaddr + length);
413 outer_cache_op = outer_clean_range;
Olav Hauganef010712012-03-05 14:19:46 -0800414 break;
415 case ION_IOC_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700416 dmac_inv_range(vaddr, vaddr + length);
417 outer_cache_op = outer_inv_range;
Olav Hauganef010712012-03-05 14:19:46 -0800418 break;
419 case ION_IOC_CLEAN_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700420 dmac_flush_range(vaddr, vaddr + length);
421 outer_cache_op = outer_flush_range;
Olav Hauganef010712012-03-05 14:19:46 -0800422 break;
423 default:
424 return -EINVAL;
425 }
426
Olav Haugan85c95402012-05-30 17:32:37 -0700427 if (iommu_heap->has_outer_cache) {
428 unsigned long pstart;
429 unsigned int i;
430 struct ion_iommu_priv_data *data = buffer->priv_virt;
431 if (!data)
432 return -ENOMEM;
Olav Hauganef010712012-03-05 14:19:46 -0800433
Olav Haugan85c95402012-05-30 17:32:37 -0700434 for (i = 0; i < data->nrpages; ++i) {
435 pstart = page_to_phys(data->pages[i]);
436 outer_cache_op(pstart, pstart + PAGE_SIZE);
437 }
438 }
Olav Hauganef010712012-03-05 14:19:46 -0800439 return 0;
440}
Laura Abbott8c017362011-09-22 20:59:12 -0700441
Laura Abbottb14ed962012-01-30 14:18:08 -0800442static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap,
Olav Hauganab804b82012-03-05 14:41:16 -0800443 struct ion_buffer *buffer)
444{
Laura Abbottb14ed962012-01-30 14:18:08 -0800445 return buffer->sg_table;
Olav Hauganab804b82012-03-05 14:41:16 -0800446}
447
448static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
449 struct ion_buffer *buffer)
450{
Olav Hauganab804b82012-03-05 14:41:16 -0800451}
452
Laura Abbott8c017362011-09-22 20:59:12 -0700453static struct ion_heap_ops iommu_heap_ops = {
454 .allocate = ion_iommu_heap_allocate,
455 .free = ion_iommu_heap_free,
456 .map_user = ion_iommu_heap_map_user,
457 .map_kernel = ion_iommu_heap_map_kernel,
458 .unmap_kernel = ion_iommu_heap_unmap_kernel,
459 .map_iommu = ion_iommu_heap_map_iommu,
460 .unmap_iommu = ion_iommu_heap_unmap_iommu,
Olav Hauganef010712012-03-05 14:19:46 -0800461 .cache_op = ion_iommu_cache_ops,
Olav Hauganab804b82012-03-05 14:41:16 -0800462 .map_dma = ion_iommu_heap_map_dma,
463 .unmap_dma = ion_iommu_heap_unmap_dma,
Laura Abbott8c017362011-09-22 20:59:12 -0700464};
465
466struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
467{
468 struct ion_iommu_heap *iommu_heap;
469
470 iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL);
471 if (!iommu_heap)
472 return ERR_PTR(-ENOMEM);
473
474 iommu_heap->heap.ops = &iommu_heap_ops;
475 iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
Olav Haugan85c95402012-05-30 17:32:37 -0700476 iommu_heap->has_outer_cache = heap_data->has_outer_cache;
Laura Abbott8c017362011-09-22 20:59:12 -0700477
478 return &iommu_heap->heap;
479}
480
481void ion_iommu_heap_destroy(struct ion_heap *heap)
482{
483 struct ion_iommu_heap *iommu_heap =
484 container_of(heap, struct ion_iommu_heap, heap);
485
486 kfree(iommu_heap);
487 iommu_heap = NULL;
488}