Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2004-2006 Atmel Corporation |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/dma-mapping.h> |
| 10 | |
| 11 | #include <asm/addrspace.h> |
| 12 | #include <asm/cacheflush.h> |
| 13 | |
Ralf Baechle | d3fa72e | 2006-12-06 20:38:56 -0800 | [diff] [blame] | 14 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction) |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 15 | { |
| 16 | /* |
| 17 | * No need to sync an uncached area |
| 18 | */ |
| 19 | if (PXSEG(vaddr) == P2SEG) |
| 20 | return; |
| 21 | |
| 22 | switch (direction) { |
| 23 | case DMA_FROM_DEVICE: /* invalidate only */ |
Ralf Baechle | 622a9ed | 2007-10-16 23:29:42 -0700 | [diff] [blame] | 24 | invalidate_dcache_region(vaddr, size); |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 25 | break; |
| 26 | case DMA_TO_DEVICE: /* writeback only */ |
Ralf Baechle | 622a9ed | 2007-10-16 23:29:42 -0700 | [diff] [blame] | 27 | clean_dcache_region(vaddr, size); |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 28 | break; |
| 29 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ |
Ralf Baechle | 622a9ed | 2007-10-16 23:29:42 -0700 | [diff] [blame] | 30 | flush_dcache_region(vaddr, size); |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 31 | break; |
| 32 | default: |
| 33 | BUG(); |
| 34 | } |
| 35 | } |
| 36 | EXPORT_SYMBOL(dma_cache_sync); |
| 37 | |
| 38 | static struct page *__dma_alloc(struct device *dev, size_t size, |
| 39 | dma_addr_t *handle, gfp_t gfp) |
| 40 | { |
| 41 | struct page *page, *free, *end; |
| 42 | int order; |
| 43 | |
| 44 | size = PAGE_ALIGN(size); |
| 45 | order = get_order(size); |
| 46 | |
| 47 | page = alloc_pages(gfp, order); |
| 48 | if (!page) |
| 49 | return NULL; |
| 50 | split_page(page, order); |
| 51 | |
| 52 | /* |
| 53 | * When accessing physical memory with valid cache data, we |
| 54 | * get a cache hit even if the virtual memory region is marked |
| 55 | * as uncached. |
| 56 | * |
| 57 | * Since the memory is newly allocated, there is no point in |
| 58 | * doing a writeback. If the previous owner cares, he should |
| 59 | * have flushed the cache before releasing the memory. |
| 60 | */ |
| 61 | invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size); |
| 62 | |
| 63 | *handle = page_to_bus(page); |
| 64 | free = page + (size >> PAGE_SHIFT); |
| 65 | end = page + (1 << order); |
| 66 | |
| 67 | /* |
| 68 | * Free any unused pages |
| 69 | */ |
| 70 | while (free < end) { |
| 71 | __free_page(free); |
| 72 | free++; |
| 73 | } |
| 74 | |
| 75 | return page; |
| 76 | } |
| 77 | |
| 78 | static void __dma_free(struct device *dev, size_t size, |
| 79 | struct page *page, dma_addr_t handle) |
| 80 | { |
| 81 | struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT); |
| 82 | |
| 83 | while (page < end) |
| 84 | __free_page(page++); |
| 85 | } |
| 86 | |
| 87 | void *dma_alloc_coherent(struct device *dev, size_t size, |
| 88 | dma_addr_t *handle, gfp_t gfp) |
| 89 | { |
| 90 | struct page *page; |
| 91 | void *ret = NULL; |
| 92 | |
| 93 | page = __dma_alloc(dev, size, handle, gfp); |
| 94 | if (page) |
| 95 | ret = phys_to_uncached(page_to_phys(page)); |
| 96 | |
| 97 | return ret; |
| 98 | } |
| 99 | EXPORT_SYMBOL(dma_alloc_coherent); |
| 100 | |
| 101 | void dma_free_coherent(struct device *dev, size_t size, |
| 102 | void *cpu_addr, dma_addr_t handle) |
| 103 | { |
| 104 | void *addr = phys_to_cached(uncached_to_phys(cpu_addr)); |
| 105 | struct page *page; |
| 106 | |
| 107 | pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n", |
| 108 | cpu_addr, (unsigned long)handle, (unsigned)size); |
| 109 | BUG_ON(!virt_addr_valid(addr)); |
| 110 | page = virt_to_page(addr); |
| 111 | __dma_free(dev, size, page, handle); |
| 112 | } |
| 113 | EXPORT_SYMBOL(dma_free_coherent); |
| 114 | |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 115 | void *dma_alloc_writecombine(struct device *dev, size_t size, |
| 116 | dma_addr_t *handle, gfp_t gfp) |
| 117 | { |
| 118 | struct page *page; |
Haavard Skinnemoen | a492dbb | 2007-03-20 14:41:13 +0100 | [diff] [blame] | 119 | dma_addr_t phys; |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 120 | |
| 121 | page = __dma_alloc(dev, size, handle, gfp); |
Haavard Skinnemoen | a492dbb | 2007-03-20 14:41:13 +0100 | [diff] [blame] | 122 | if (!page) |
| 123 | return NULL; |
| 124 | |
| 125 | phys = page_to_phys(page); |
| 126 | *handle = phys; |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 127 | |
| 128 | /* Now, map the page into P3 with write-combining turned on */ |
Haavard Skinnemoen | a492dbb | 2007-03-20 14:41:13 +0100 | [diff] [blame] | 129 | return __ioremap(phys, size, _PAGE_BUFFER); |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 130 | } |
| 131 | EXPORT_SYMBOL(dma_alloc_writecombine); |
| 132 | |
| 133 | void dma_free_writecombine(struct device *dev, size_t size, |
| 134 | void *cpu_addr, dma_addr_t handle) |
| 135 | { |
| 136 | struct page *page; |
| 137 | |
| 138 | iounmap(cpu_addr); |
| 139 | |
Haavard Skinnemoen | a492dbb | 2007-03-20 14:41:13 +0100 | [diff] [blame] | 140 | page = phys_to_page(handle); |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 141 | __dma_free(dev, size, page, handle); |
| 142 | } |
| 143 | EXPORT_SYMBOL(dma_free_writecombine); |