| /* |
| * drivers/gpu/ion/ion_heap.c |
| * |
| * Copyright (C) 2011 Google, Inc. |
| * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. |
| * |
| * This software is licensed under the terms of the GNU General Public |
| * License version 2, as published by the Free Software Foundation, and |
| * may be copied, distributed, and modified under those terms. |
| * |
| * This program is distributed in the hope that it will be useful, |
| * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| * GNU General Public License for more details. |
| * |
| */ |
| |
| #include <linux/err.h> |
| #include <linux/ion.h> |
| #include <linux/mm.h> |
| #include <linux/scatterlist.h> |
| #include <linux/vmalloc.h> |
| #include "ion_priv.h" |
| |
| void *ion_heap_map_kernel(struct ion_heap *heap, |
| struct ion_buffer *buffer) |
| { |
| struct scatterlist *sg; |
| int i, j; |
| void *vaddr; |
| pgprot_t pgprot; |
| struct sg_table *table = buffer->sg_table; |
| int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; |
| struct page **pages = vmalloc(sizeof(struct page *) * npages); |
| struct page **tmp = pages; |
| |
| if (!pages) |
| return 0; |
| |
| if (buffer->flags & ION_FLAG_CACHED) |
| pgprot = PAGE_KERNEL; |
| else |
| pgprot = pgprot_writecombine(PAGE_KERNEL); |
| |
| for_each_sg(table->sgl, sg, table->nents, i) { |
| int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE; |
| struct page *page = sg_page(sg); |
| BUG_ON(i >= npages); |
| for (j = 0; j < npages_this_entry; j++) { |
| *(tmp++) = page++; |
| } |
| } |
| vaddr = vmap(pages, npages, VM_MAP, pgprot); |
| vfree(pages); |
| |
| return vaddr; |
| } |
| |
| void ion_heap_unmap_kernel(struct ion_heap *heap, |
| struct ion_buffer *buffer) |
| { |
| vunmap(buffer->vaddr); |
| } |
| |
| int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, |
| struct vm_area_struct *vma) |
| { |
| struct sg_table *table = buffer->sg_table; |
| unsigned long addr = vma->vm_start; |
| unsigned long offset = vma->vm_pgoff * PAGE_SIZE; |
| struct scatterlist *sg; |
| int i; |
| |
| for_each_sg(table->sgl, sg, table->nents, i) { |
| struct page *page = sg_page(sg); |
| unsigned long remainder = vma->vm_end - addr; |
| unsigned long len = sg_dma_len(sg); |
| |
| if (offset >= sg_dma_len(sg)) { |
| offset -= sg_dma_len(sg); |
| continue; |
| } else if (offset) { |
| page += offset / PAGE_SIZE; |
| len = sg_dma_len(sg) - offset; |
| offset = 0; |
| } |
| len = min(len, remainder); |
| remap_pfn_range(vma, addr, page_to_pfn(page), len, |
| vma->vm_page_prot); |
| addr += len; |
| if (addr >= vma->vm_end) |
| return 0; |
| } |
| return 0; |
| } |
| |
| int ion_heap_buffer_zero(struct ion_buffer *buffer) |
| { |
| struct sg_table *table = buffer->sg_table; |
| pgprot_t pgprot; |
| struct scatterlist *sg; |
| struct vm_struct *vm_struct; |
| int i, j, ret = 0; |
| |
| if (buffer->flags & ION_FLAG_CACHED) |
| pgprot = PAGE_KERNEL; |
| else |
| pgprot = pgprot_writecombine(PAGE_KERNEL); |
| |
| vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC); |
| if (!vm_struct) |
| return -ENOMEM; |
| |
| for_each_sg(table->sgl, sg, table->nents, i) { |
| struct page *page = sg_page(sg); |
| unsigned long len = sg_dma_len(sg); |
| |
| for (j = 0; j < len / PAGE_SIZE; j++) { |
| struct page *sub_page = page + j; |
| struct page **pages = &sub_page; |
| ret = map_vm_area(vm_struct, pgprot, &pages); |
| if (ret) |
| goto end; |
| memset(vm_struct->addr, 0, PAGE_SIZE); |
| unmap_kernel_range((unsigned long)vm_struct->addr, |
| PAGE_SIZE); |
| } |
| } |
| end: |
| free_vm_area(vm_struct); |
| return ret; |
| } |
| |
| struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) |
| { |
| struct ion_heap *heap = NULL; |
| |
| switch (heap_data->type) { |
| case ION_HEAP_TYPE_SYSTEM_CONTIG: |
| heap = ion_system_contig_heap_create(heap_data); |
| break; |
| case ION_HEAP_TYPE_SYSTEM: |
| heap = ion_system_heap_create(heap_data); |
| break; |
| case ION_HEAP_TYPE_CARVEOUT: |
| heap = ion_carveout_heap_create(heap_data); |
| break; |
| case ION_HEAP_TYPE_CHUNK: |
| heap = ion_chunk_heap_create(heap_data); |
| break; |
| default: |
| pr_err("%s: Invalid heap type %d\n", __func__, |
| heap_data->type); |
| return ERR_PTR(-EINVAL); |
| } |
| |
| if (IS_ERR_OR_NULL(heap)) { |
| pr_err("%s: error creating heap %s type %d base %pa size %u\n", |
| __func__, heap_data->name, heap_data->type, |
| &heap_data->base, heap_data->size); |
| return ERR_PTR(-EINVAL); |
| } |
| |
| heap->name = heap_data->name; |
| heap->id = heap_data->id; |
| heap->priv = heap_data->priv; |
| return heap; |
| } |
| |
| void ion_heap_destroy(struct ion_heap *heap) |
| { |
| if (!heap) |
| return; |
| |
| switch (heap->type) { |
| case ION_HEAP_TYPE_SYSTEM_CONTIG: |
| ion_system_contig_heap_destroy(heap); |
| break; |
| case ION_HEAP_TYPE_SYSTEM: |
| ion_system_heap_destroy(heap); |
| break; |
| case ION_HEAP_TYPE_CARVEOUT: |
| ion_carveout_heap_destroy(heap); |
| break; |
| case ION_HEAP_TYPE_CHUNK: |
| ion_chunk_heap_destroy(heap); |
| break; |
| default: |
| pr_err("%s: Invalid heap type %d\n", __func__, |
| heap->type); |
| } |
| } |