Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * arch/sh/mm/consistent.c |
| 3 | * |
Paul Mundt | 8a7bcf0 | 2007-11-11 17:07:06 +0900 | [diff] [blame^] | 4 | * Copyright (C) 2004 - 2007 Paul Mundt |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * |
| 6 | * This file is subject to the terms and conditions of the GNU General Public |
| 7 | * License. See the file "COPYING" in the main directory of this archive |
| 8 | * for more details. |
| 9 | */ |
| 10 | #include <linux/mm.h> |
| 11 | #include <linux/dma-mapping.h> |
Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 12 | #include <asm/cacheflush.h> |
| 13 | #include <asm/addrspace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <asm/io.h> |
| 15 | |
Al Viro | 6dae2c2 | 2005-10-21 03:21:38 -0400 | [diff] [blame] | 16 | void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | { |
| 18 | struct page *page, *end, *free; |
Paul Mundt | 8a7bcf0 | 2007-11-11 17:07:06 +0900 | [diff] [blame^] | 19 | void *ret, *vp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | int order; |
| 21 | |
| 22 | size = PAGE_ALIGN(size); |
| 23 | order = get_order(size); |
| 24 | |
| 25 | page = alloc_pages(gfp, order); |
| 26 | if (!page) |
| 27 | return NULL; |
Nick Piggin | 8dfcc9b | 2006-03-22 00:08:05 -0800 | [diff] [blame] | 28 | split_page(page, order); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | |
| 30 | ret = page_address(page); |
| 31 | *handle = virt_to_phys(ret); |
| 32 | |
Paul Mundt | 8a7bcf0 | 2007-11-11 17:07:06 +0900 | [diff] [blame^] | 33 | vp = ioremap_nocache(*handle, size); |
| 34 | if (!vp) { |
| 35 | free_pages((unsigned long)ret, order); |
| 36 | return NULL; |
| 37 | } |
| 38 | |
| 39 | memset(vp, 0, size); |
| 40 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | /* |
| 42 | * We must flush the cache before we pass it on to the device |
| 43 | */ |
Paul Mundt | 8a7bcf0 | 2007-11-11 17:07:06 +0900 | [diff] [blame^] | 44 | dma_cache_sync(NULL, ret, size, DMA_BIDIRECTIONAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | |
| 46 | page = virt_to_page(ret); |
| 47 | free = page + (size >> PAGE_SHIFT); |
| 48 | end = page + (1 << order); |
| 49 | |
| 50 | while (++page < end) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | /* Free any unused pages */ |
| 52 | if (page >= free) { |
| 53 | __free_page(page); |
| 54 | } |
| 55 | } |
| 56 | |
Paul Mundt | 8a7bcf0 | 2007-11-11 17:07:06 +0900 | [diff] [blame^] | 57 | return vp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | } |
Paul Mundt | 8a7bcf0 | 2007-11-11 17:07:06 +0900 | [diff] [blame^] | 59 | EXPORT_SYMBOL(consistent_alloc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | |
Paul Mundt | 8a7bcf0 | 2007-11-11 17:07:06 +0900 | [diff] [blame^] | 61 | void consistent_free(void *vaddr, size_t size, dma_addr_t dma_handle) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | { |
Paul Mundt | 8a7bcf0 | 2007-11-11 17:07:06 +0900 | [diff] [blame^] | 63 | struct page *page; |
| 64 | unsigned long addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | |
Paul Mundt | 8a7bcf0 | 2007-11-11 17:07:06 +0900 | [diff] [blame^] | 66 | addr = (unsigned long)phys_to_virt((unsigned long)dma_handle); |
| 67 | page = virt_to_page(addr); |
| 68 | |
| 69 | free_pages(addr, get_order(size)); |
| 70 | |
| 71 | iounmap(vaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | } |
Paul Mundt | 8a7bcf0 | 2007-11-11 17:07:06 +0900 | [diff] [blame^] | 73 | EXPORT_SYMBOL(consistent_free); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | |
| 75 | void consistent_sync(void *vaddr, size_t size, int direction) |
| 76 | { |
Paul Mundt | 8a7bcf0 | 2007-11-11 17:07:06 +0900 | [diff] [blame^] | 77 | #ifdef CONFIG_CPU_SH5 |
| 78 | void *p1addr = vaddr; |
| 79 | #else |
| 80 | void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr); |
| 81 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | |
| 83 | switch (direction) { |
| 84 | case DMA_FROM_DEVICE: /* invalidate only */ |
Ralf Baechle | 622a9ed | 2007-10-16 23:29:42 -0700 | [diff] [blame] | 85 | __flush_invalidate_region(p1addr, size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | break; |
| 87 | case DMA_TO_DEVICE: /* writeback only */ |
Ralf Baechle | 622a9ed | 2007-10-16 23:29:42 -0700 | [diff] [blame] | 88 | __flush_wback_region(p1addr, size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | break; |
| 90 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ |
Ralf Baechle | 622a9ed | 2007-10-16 23:29:42 -0700 | [diff] [blame] | 91 | __flush_purge_region(p1addr, size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | break; |
| 93 | default: |
| 94 | BUG(); |
| 95 | } |
| 96 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | EXPORT_SYMBOL(consistent_sync); |