blob: 65ad30031ad7eeb81a2716c796abbbeed1585fb7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/sh/mm/consistent.c
3 *
Paul Mundt8a7bcf02007-11-11 17:07:06 +09004 * Copyright (C) 2004 - 2007 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/mm.h>
11#include <linux/dma-mapping.h>
Paul Mundt26ff6c12006-09-27 15:13:36 +090012#include <asm/cacheflush.h>
13#include <asm/addrspace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <asm/io.h>
15
Al Viro6dae2c22005-10-21 03:21:38 -040016void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -070017{
18 struct page *page, *end, *free;
Paul Mundt8a7bcf02007-11-11 17:07:06 +090019 void *ret, *vp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 int order;
21
22 size = PAGE_ALIGN(size);
23 order = get_order(size);
24
25 page = alloc_pages(gfp, order);
26 if (!page)
27 return NULL;
Nick Piggin8dfcc9b2006-03-22 00:08:05 -080028 split_page(page, order);
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30 ret = page_address(page);
31 *handle = virt_to_phys(ret);
32
Paul Mundt8a7bcf02007-11-11 17:07:06 +090033 vp = ioremap_nocache(*handle, size);
34 if (!vp) {
35 free_pages((unsigned long)ret, order);
36 return NULL;
37 }
38
39 memset(vp, 0, size);
40
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 /*
42 * We must flush the cache before we pass it on to the device
43 */
Paul Mundt8a7bcf02007-11-11 17:07:06 +090044 dma_cache_sync(NULL, ret, size, DMA_BIDIRECTIONAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46 page = virt_to_page(ret);
47 free = page + (size >> PAGE_SHIFT);
48 end = page + (1 << order);
49
50 while (++page < end) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 /* Free any unused pages */
52 if (page >= free) {
53 __free_page(page);
54 }
55 }
56
Paul Mundt8a7bcf02007-11-11 17:07:06 +090057 return vp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058}
Paul Mundt8a7bcf02007-11-11 17:07:06 +090059EXPORT_SYMBOL(consistent_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Paul Mundt8a7bcf02007-11-11 17:07:06 +090061void consistent_free(void *vaddr, size_t size, dma_addr_t dma_handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -070062{
Paul Mundt8a7bcf02007-11-11 17:07:06 +090063 struct page *page;
64 unsigned long addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Paul Mundt8a7bcf02007-11-11 17:07:06 +090066 addr = (unsigned long)phys_to_virt((unsigned long)dma_handle);
67 page = virt_to_page(addr);
68
69 free_pages(addr, get_order(size));
70
71 iounmap(vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070072}
Paul Mundt8a7bcf02007-11-11 17:07:06 +090073EXPORT_SYMBOL(consistent_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75void consistent_sync(void *vaddr, size_t size, int direction)
76{
Paul Mundt8a7bcf02007-11-11 17:07:06 +090077#ifdef CONFIG_CPU_SH5
78 void *p1addr = vaddr;
79#else
80 void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
81#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070082
83 switch (direction) {
84 case DMA_FROM_DEVICE: /* invalidate only */
Ralf Baechle622a9ed2007-10-16 23:29:42 -070085 __flush_invalidate_region(p1addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 break;
87 case DMA_TO_DEVICE: /* writeback only */
Ralf Baechle622a9ed2007-10-16 23:29:42 -070088 __flush_wback_region(p1addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 break;
90 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
Ralf Baechle622a9ed2007-10-16 23:29:42 -070091 __flush_purge_region(p1addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 break;
93 default:
94 BUG();
95 }
96}
Linus Torvalds1da177e2005-04-16 15:20:36 -070097EXPORT_SYMBOL(consistent_sync);