| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Dynamic DMA mapping support. | 
|  | 3 | * | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 4 | * This implementation is a fallback for platforms that do not support | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * I/O TLBs (aka DMA address translation hardware). | 
|  | 6 | * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> | 
|  | 7 | * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> | 
|  | 8 | * Copyright (C) 2000, 2003 Hewlett-Packard Co | 
|  | 9 | *	David Mosberger-Tang <davidm@hpl.hp.com> | 
|  | 10 | * | 
|  | 11 | * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API. | 
|  | 12 | * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid | 
|  | 13 | *			unnecessary i-cache flushing. | 
| John W. Linville | 569c8bf | 2005-09-29 14:45:24 -0700 | [diff] [blame] | 14 | * 04/07/.. ak		Better overflow handling. Assorted fixes. | 
|  | 15 | * 05/09/10 linville	Add support for syncing ranges, support syncing for | 
|  | 16 | *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | */ | 
|  | 18 |  | 
|  | 19 | #include <linux/cache.h> | 
| Tony Luck | 17e5ad6 | 2005-09-29 15:52:13 -0700 | [diff] [blame] | 20 | #include <linux/dma-mapping.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/mm.h> | 
|  | 22 | #include <linux/module.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include <linux/spinlock.h> | 
| Jeremy Fitzhardinge | 8c5df16 | 2008-12-16 12:17:26 -0800 | [diff] [blame] | 24 | #include <linux/swiotlb.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/string.h> | 
| Ian Campbell | 0016fde | 2008-12-16 12:17:27 -0800 | [diff] [blame] | 26 | #include <linux/swiotlb.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <linux/types.h> | 
|  | 28 | #include <linux/ctype.h> | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 29 | #include <linux/highmem.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 |  | 
|  | 31 | #include <asm/io.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #include <asm/dma.h> | 
| Tony Luck | 17e5ad6 | 2005-09-29 15:52:13 -0700 | [diff] [blame] | 33 | #include <asm/scatterlist.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 |  | 
|  | 35 | #include <linux/init.h> | 
|  | 36 | #include <linux/bootmem.h> | 
| FUJITA Tomonori | a852250 | 2008-04-29 00:59:36 -0700 | [diff] [blame] | 37 | #include <linux/iommu-helper.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 |  | 
|  | 39 | #define OFFSET(val,align) ((unsigned long)	\ | 
|  | 40 | ( (val) & ( (align) - 1))) | 
|  | 41 |  | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 42 | #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) | 
|  | 43 |  | 
|  | 44 | /* | 
|  | 45 | * Minimum IO TLB size to bother booting with.  Systems with mainly | 
|  | 46 | * 64bit capable cards will only lightly use the swiotlb.  If we can't | 
|  | 47 | * allocate a contiguous 1MB, we're probably in trouble anyway. | 
|  | 48 | */ | 
|  | 49 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) | 
|  | 50 |  | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 51 | /* | 
|  | 52 | * Enumeration for sync targets | 
|  | 53 | */ | 
|  | 54 | enum dma_sync_target { | 
|  | 55 | SYNC_FOR_CPU = 0, | 
|  | 56 | SYNC_FOR_DEVICE = 1, | 
|  | 57 | }; | 
|  | 58 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | int swiotlb_force; | 
|  | 60 |  | 
|  | 61 | /* | 
|  | 62 | * Used to do a quick range check in swiotlb_unmap_single and | 
|  | 63 | * swiotlb_sync_single_*, to see if the memory was in fact allocated by this | 
|  | 64 | * API. | 
|  | 65 | */ | 
|  | 66 | static char *io_tlb_start, *io_tlb_end; | 
|  | 67 |  | 
|  | 68 | /* | 
|  | 69 | * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and | 
|  | 70 | * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages. | 
|  | 71 | */ | 
|  | 72 | static unsigned long io_tlb_nslabs; | 
|  | 73 |  | 
|  | 74 | /* | 
|  | 75 | * When the IOMMU overflows we return a fallback buffer. This sets the size. | 
|  | 76 | */ | 
|  | 77 | static unsigned long io_tlb_overflow = 32*1024; | 
|  | 78 |  | 
|  | 79 | void *io_tlb_overflow_buffer; | 
|  | 80 |  | 
|  | 81 | /* | 
|  | 82 | * This is a free list describing the number of free entries available from | 
|  | 83 | * each index | 
|  | 84 | */ | 
|  | 85 | static unsigned int *io_tlb_list; | 
|  | 86 | static unsigned int io_tlb_index; | 
|  | 87 |  | 
|  | 88 | /* | 
|  | 89 | * We need to save away the original address corresponding to a mapped entry | 
|  | 90 | * for the sync operations. | 
|  | 91 | */ | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 92 | static struct swiotlb_phys_addr { | 
|  | 93 | struct page *page; | 
|  | 94 | unsigned int offset; | 
|  | 95 | } *io_tlb_orig_addr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 |  | 
|  | 97 | /* | 
|  | 98 | * Protect the above data structures in the map and unmap calls | 
|  | 99 | */ | 
|  | 100 | static DEFINE_SPINLOCK(io_tlb_lock); | 
|  | 101 |  | 
|  | 102 | static int __init | 
|  | 103 | setup_io_tlb_npages(char *str) | 
|  | 104 | { | 
|  | 105 | if (isdigit(*str)) { | 
| Alex Williamson | e8579e7 | 2005-08-04 13:06:00 -0700 | [diff] [blame] | 106 | io_tlb_nslabs = simple_strtoul(str, &str, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | /* avoid tail segment of size < IO_TLB_SEGSIZE */ | 
|  | 108 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); | 
|  | 109 | } | 
|  | 110 | if (*str == ',') | 
|  | 111 | ++str; | 
|  | 112 | if (!strcmp(str, "force")) | 
|  | 113 | swiotlb_force = 1; | 
|  | 114 | return 1; | 
|  | 115 | } | 
|  | 116 | __setup("swiotlb=", setup_io_tlb_npages); | 
|  | 117 | /* make io_tlb_overflow tunable too? */ | 
|  | 118 |  | 
| Jeremy Fitzhardinge | 8c5df16 | 2008-12-16 12:17:26 -0800 | [diff] [blame] | 119 | void * __weak swiotlb_alloc_boot(size_t size, unsigned long nslabs) | 
|  | 120 | { | 
|  | 121 | return alloc_bootmem_low_pages(size); | 
|  | 122 | } | 
|  | 123 |  | 
|  | 124 | void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs) | 
|  | 125 | { | 
|  | 126 | return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); | 
|  | 127 | } | 
|  | 128 |  | 
| Ian Campbell | e08e1f7 | 2008-12-16 12:17:30 -0800 | [diff] [blame] | 129 | dma_addr_t __weak swiotlb_phys_to_bus(phys_addr_t paddr) | 
|  | 130 | { | 
|  | 131 | return paddr; | 
|  | 132 | } | 
|  | 133 |  | 
|  | 134 | phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr) | 
|  | 135 | { | 
|  | 136 | return baddr; | 
|  | 137 | } | 
|  | 138 |  | 
|  | 139 | static dma_addr_t swiotlb_virt_to_bus(volatile void *address) | 
|  | 140 | { | 
|  | 141 | return swiotlb_phys_to_bus(virt_to_phys(address)); | 
|  | 142 | } | 
|  | 143 |  | 
|  | 144 | static void *swiotlb_bus_to_virt(dma_addr_t address) | 
|  | 145 | { | 
|  | 146 | return phys_to_virt(swiotlb_bus_to_phys(address)); | 
|  | 147 | } | 
|  | 148 |  | 
| Ian Campbell | b81ea27 | 2008-12-16 12:17:31 -0800 | [diff] [blame] | 149 | int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size) | 
|  | 150 | { | 
|  | 151 | return 0; | 
|  | 152 | } | 
|  | 153 |  | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 154 | static dma_addr_t swiotlb_sg_to_bus(struct scatterlist *sg) | 
|  | 155 | { | 
|  | 156 | return swiotlb_phys_to_bus(page_to_phys(sg_page(sg)) + sg->offset); | 
|  | 157 | } | 
|  | 158 |  | 
| Ian Campbell | 2e5b2b8 | 2008-12-16 12:17:34 -0800 | [diff] [blame] | 159 | static void swiotlb_print_info(unsigned long bytes) | 
|  | 160 | { | 
|  | 161 | phys_addr_t pstart, pend; | 
|  | 162 | dma_addr_t bstart, bend; | 
|  | 163 |  | 
|  | 164 | pstart = virt_to_phys(io_tlb_start); | 
|  | 165 | pend = virt_to_phys(io_tlb_end); | 
|  | 166 |  | 
|  | 167 | bstart = swiotlb_phys_to_bus(pstart); | 
|  | 168 | bend = swiotlb_phys_to_bus(pend); | 
|  | 169 |  | 
|  | 170 | printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n", | 
|  | 171 | bytes >> 20, io_tlb_start, io_tlb_end); | 
|  | 172 | if (pstart != bstart || pend != bend) | 
|  | 173 | printk(KERN_INFO "software IO TLB at phys %#llx - %#llx" | 
|  | 174 | " bus %#llx - %#llx\n", | 
|  | 175 | (unsigned long long)pstart, | 
|  | 176 | (unsigned long long)pend, | 
|  | 177 | (unsigned long long)bstart, | 
|  | 178 | (unsigned long long)bend); | 
|  | 179 | else | 
|  | 180 | printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n", | 
|  | 181 | (unsigned long long)pstart, | 
|  | 182 | (unsigned long long)pend); | 
|  | 183 | } | 
|  | 184 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | /* | 
|  | 186 | * Statically reserve bounce buffer space and initialize bounce buffer data | 
| Tony Luck | 17e5ad6 | 2005-09-29 15:52:13 -0700 | [diff] [blame] | 187 | * structures for the software IO TLB used to implement the DMA API. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | */ | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 189 | void __init | 
|  | 190 | swiotlb_init_with_default_size(size_t default_size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | { | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 192 | unsigned long i, bytes; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 |  | 
|  | 194 | if (!io_tlb_nslabs) { | 
| Alex Williamson | e8579e7 | 2005-08-04 13:06:00 -0700 | [diff] [blame] | 195 | io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); | 
|  | 197 | } | 
|  | 198 |  | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 199 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; | 
|  | 200 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | /* | 
|  | 202 | * Get IO TLB memory from the low pages | 
|  | 203 | */ | 
| Jeremy Fitzhardinge | 8c5df16 | 2008-12-16 12:17:26 -0800 | [diff] [blame] | 204 | io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | if (!io_tlb_start) | 
|  | 206 | panic("Cannot allocate SWIOTLB buffer"); | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 207 | io_tlb_end = io_tlb_start + bytes; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 |  | 
|  | 209 | /* | 
|  | 210 | * Allocate and initialize the free list array.  This array is used | 
|  | 211 | * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE | 
|  | 212 | * between io_tlb_start and io_tlb_end. | 
|  | 213 | */ | 
|  | 214 | io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); | 
| Tony Luck | 25667d6 | 2007-03-06 13:31:45 -0800 | [diff] [blame] | 215 | for (i = 0; i < io_tlb_nslabs; i++) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); | 
|  | 217 | io_tlb_index = 0; | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 218 | io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 |  | 
|  | 220 | /* | 
|  | 221 | * Get the overflow emergency buffer | 
|  | 222 | */ | 
|  | 223 | io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 224 | if (!io_tlb_overflow_buffer) | 
|  | 225 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); | 
|  | 226 |  | 
| Ian Campbell | 2e5b2b8 | 2008-12-16 12:17:34 -0800 | [diff] [blame] | 227 | swiotlb_print_info(bytes); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | } | 
|  | 229 |  | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 230 | void __init | 
|  | 231 | swiotlb_init(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | { | 
| Tony Luck | 25667d6 | 2007-03-06 13:31:45 -0800 | [diff] [blame] | 233 | swiotlb_init_with_default_size(64 * (1<<20));	/* default to 64MB */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | } | 
|  | 235 |  | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 236 | /* | 
|  | 237 | * Systems with larger DMA zones (those that don't support ISA) can | 
|  | 238 | * initialize the swiotlb later using the slab allocator if needed. | 
|  | 239 | * This should be just like above, but with some error catching. | 
|  | 240 | */ | 
|  | 241 | int | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 242 | swiotlb_late_init_with_default_size(size_t default_size) | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 243 | { | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 244 | unsigned long i, bytes, req_nslabs = io_tlb_nslabs; | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 245 | unsigned int order; | 
|  | 246 |  | 
|  | 247 | if (!io_tlb_nslabs) { | 
|  | 248 | io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); | 
|  | 249 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); | 
|  | 250 | } | 
|  | 251 |  | 
|  | 252 | /* | 
|  | 253 | * Get IO TLB memory from the low pages | 
|  | 254 | */ | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 255 | order = get_order(io_tlb_nslabs << IO_TLB_SHIFT); | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 256 | io_tlb_nslabs = SLABS_PER_PAGE << order; | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 257 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 258 |  | 
|  | 259 | while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { | 
| Jeremy Fitzhardinge | 8c5df16 | 2008-12-16 12:17:26 -0800 | [diff] [blame] | 260 | io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs); | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 261 | if (io_tlb_start) | 
|  | 262 | break; | 
|  | 263 | order--; | 
|  | 264 | } | 
|  | 265 |  | 
|  | 266 | if (!io_tlb_start) | 
|  | 267 | goto cleanup1; | 
|  | 268 |  | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 269 | if (order != get_order(bytes)) { | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 270 | printk(KERN_WARNING "Warning: only able to allocate %ld MB " | 
|  | 271 | "for software IO TLB\n", (PAGE_SIZE << order) >> 20); | 
|  | 272 | io_tlb_nslabs = SLABS_PER_PAGE << order; | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 273 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 274 | } | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 275 | io_tlb_end = io_tlb_start + bytes; | 
|  | 276 | memset(io_tlb_start, 0, bytes); | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 277 |  | 
|  | 278 | /* | 
|  | 279 | * Allocate and initialize the free list array.  This array is used | 
|  | 280 | * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE | 
|  | 281 | * between io_tlb_start and io_tlb_end. | 
|  | 282 | */ | 
|  | 283 | io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, | 
|  | 284 | get_order(io_tlb_nslabs * sizeof(int))); | 
|  | 285 | if (!io_tlb_list) | 
|  | 286 | goto cleanup2; | 
|  | 287 |  | 
|  | 288 | for (i = 0; i < io_tlb_nslabs; i++) | 
|  | 289 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); | 
|  | 290 | io_tlb_index = 0; | 
|  | 291 |  | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 292 | io_tlb_orig_addr = (struct swiotlb_phys_addr *)__get_free_pages(GFP_KERNEL, | 
|  | 293 | get_order(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr))); | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 294 | if (!io_tlb_orig_addr) | 
|  | 295 | goto cleanup3; | 
|  | 296 |  | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 297 | memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(struct swiotlb_phys_addr)); | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 298 |  | 
|  | 299 | /* | 
|  | 300 | * Get the overflow emergency buffer | 
|  | 301 | */ | 
|  | 302 | io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA, | 
|  | 303 | get_order(io_tlb_overflow)); | 
|  | 304 | if (!io_tlb_overflow_buffer) | 
|  | 305 | goto cleanup4; | 
|  | 306 |  | 
| Ian Campbell | 2e5b2b8 | 2008-12-16 12:17:34 -0800 | [diff] [blame] | 307 | swiotlb_print_info(bytes); | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 308 |  | 
|  | 309 | return 0; | 
|  | 310 |  | 
|  | 311 | cleanup4: | 
| Tony Luck | 25667d6 | 2007-03-06 13:31:45 -0800 | [diff] [blame] | 312 | free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * | 
|  | 313 | sizeof(char *))); | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 314 | io_tlb_orig_addr = NULL; | 
|  | 315 | cleanup3: | 
| Tony Luck | 25667d6 | 2007-03-06 13:31:45 -0800 | [diff] [blame] | 316 | free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * | 
|  | 317 | sizeof(int))); | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 318 | io_tlb_list = NULL; | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 319 | cleanup2: | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 320 | io_tlb_end = NULL; | 
| Alex Williamson | 0b9afed | 2005-09-06 11:20:49 -0600 | [diff] [blame] | 321 | free_pages((unsigned long)io_tlb_start, order); | 
|  | 322 | io_tlb_start = NULL; | 
|  | 323 | cleanup1: | 
|  | 324 | io_tlb_nslabs = req_nslabs; | 
|  | 325 | return -ENOMEM; | 
|  | 326 | } | 
|  | 327 |  | 
| Andrew Morton | be6b026 | 2007-02-12 00:52:17 -0800 | [diff] [blame] | 328 | static int | 
| FUJITA Tomonori | 2797982 | 2008-09-10 01:06:49 +0900 | [diff] [blame] | 329 | address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | { | 
| FUJITA Tomonori | 07a2c01 | 2008-09-19 02:02:05 +0900 | [diff] [blame] | 331 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | } | 
|  | 333 |  | 
| Ian Campbell | b81ea27 | 2008-12-16 12:17:31 -0800 | [diff] [blame] | 334 | static inline int range_needs_mapping(void *ptr, size_t size) | 
|  | 335 | { | 
|  | 336 | return swiotlb_force || swiotlb_arch_range_needs_mapping(ptr, size); | 
|  | 337 | } | 
|  | 338 |  | 
| FUJITA Tomonori | 640aebf | 2008-09-08 18:53:50 +0900 | [diff] [blame] | 339 | static int is_swiotlb_buffer(char *addr) | 
|  | 340 | { | 
|  | 341 | return addr >= io_tlb_start && addr < io_tlb_end; | 
|  | 342 | } | 
|  | 343 |  | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 344 | static struct swiotlb_phys_addr swiotlb_bus_to_phys_addr(char *dma_addr) | 
| Jeremy Fitzhardinge | 1b548f6 | 2008-12-16 12:17:32 -0800 | [diff] [blame] | 345 | { | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 346 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; | 
|  | 347 | struct swiotlb_phys_addr buffer = io_tlb_orig_addr[index]; | 
|  | 348 | buffer.offset += (long)dma_addr & ((1 << IO_TLB_SHIFT) - 1); | 
|  | 349 | buffer.page += buffer.offset >> PAGE_SHIFT; | 
|  | 350 | buffer.offset &= PAGE_SIZE - 1; | 
|  | 351 | return buffer; | 
|  | 352 | } | 
|  | 353 |  | 
|  | 354 | static void | 
|  | 355 | __sync_single(struct swiotlb_phys_addr buffer, char *dma_addr, size_t size, int dir) | 
|  | 356 | { | 
|  | 357 | if (PageHighMem(buffer.page)) { | 
|  | 358 | size_t len, bytes; | 
|  | 359 | char *dev, *host, *kmp; | 
|  | 360 |  | 
|  | 361 | len = size; | 
|  | 362 | while (len != 0) { | 
|  | 363 | unsigned long flags; | 
|  | 364 |  | 
|  | 365 | bytes = len; | 
|  | 366 | if ((bytes + buffer.offset) > PAGE_SIZE) | 
|  | 367 | bytes = PAGE_SIZE - buffer.offset; | 
|  | 368 | local_irq_save(flags); /* protects KM_BOUNCE_READ */ | 
|  | 369 | kmp  = kmap_atomic(buffer.page, KM_BOUNCE_READ); | 
|  | 370 | dev  = dma_addr + size - len; | 
|  | 371 | host = kmp + buffer.offset; | 
|  | 372 | if (dir == DMA_FROM_DEVICE) | 
|  | 373 | memcpy(host, dev, bytes); | 
|  | 374 | else | 
|  | 375 | memcpy(dev, host, bytes); | 
|  | 376 | kunmap_atomic(kmp, KM_BOUNCE_READ); | 
|  | 377 | local_irq_restore(flags); | 
|  | 378 | len -= bytes; | 
|  | 379 | buffer.page++; | 
|  | 380 | buffer.offset = 0; | 
|  | 381 | } | 
|  | 382 | } else { | 
|  | 383 | void *v = page_address(buffer.page) + buffer.offset; | 
|  | 384 |  | 
|  | 385 | if (dir == DMA_TO_DEVICE) | 
|  | 386 | memcpy(dma_addr, v, size); | 
|  | 387 | else | 
|  | 388 | memcpy(v, dma_addr, size); | 
|  | 389 | } | 
| Jeremy Fitzhardinge | 1b548f6 | 2008-12-16 12:17:32 -0800 | [diff] [blame] | 390 | } | 
|  | 391 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | /* | 
|  | 393 | * Allocates bounce buffer and returns its kernel virtual address. | 
|  | 394 | */ | 
|  | 395 | static void * | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 396 | map_single(struct device *hwdev, struct swiotlb_phys_addr buffer, size_t size, int dir) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | { | 
|  | 398 | unsigned long flags; | 
|  | 399 | char *dma_addr; | 
|  | 400 | unsigned int nslots, stride, index, wrap; | 
|  | 401 | int i; | 
| FUJITA Tomonori | 681cc5c | 2008-02-04 22:28:16 -0800 | [diff] [blame] | 402 | unsigned long start_dma_addr; | 
|  | 403 | unsigned long mask; | 
|  | 404 | unsigned long offset_slots; | 
|  | 405 | unsigned long max_slots; | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 406 | struct swiotlb_phys_addr slot_buf; | 
| FUJITA Tomonori | 681cc5c | 2008-02-04 22:28:16 -0800 | [diff] [blame] | 407 |  | 
|  | 408 | mask = dma_get_seg_boundary(hwdev); | 
| Ian Campbell | e08e1f7 | 2008-12-16 12:17:30 -0800 | [diff] [blame] | 409 | start_dma_addr = swiotlb_virt_to_bus(io_tlb_start) & mask; | 
| FUJITA Tomonori | 681cc5c | 2008-02-04 22:28:16 -0800 | [diff] [blame] | 410 |  | 
|  | 411 | offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 
| Ian Campbell | a5ddde4 | 2008-12-16 12:17:29 -0800 | [diff] [blame] | 412 |  | 
|  | 413 | /* | 
|  | 414 | * Carefully handle integer overflow which can occur when mask == ~0UL. | 
|  | 415 | */ | 
| Jan Beulich | b15a389 | 2008-03-13 09:13:30 +0000 | [diff] [blame] | 416 | max_slots = mask + 1 | 
|  | 417 | ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT | 
|  | 418 | : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 |  | 
|  | 420 | /* | 
|  | 421 | * For mappings greater than a page, we limit the stride (and | 
|  | 422 | * hence alignment) to a page size. | 
|  | 423 | */ | 
|  | 424 | nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 
|  | 425 | if (size > PAGE_SIZE) | 
|  | 426 | stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); | 
|  | 427 | else | 
|  | 428 | stride = 1; | 
|  | 429 |  | 
| Eric Sesterhenn | 3481454 | 2006-03-24 18:47:11 +0100 | [diff] [blame] | 430 | BUG_ON(!nslots); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 |  | 
|  | 432 | /* | 
|  | 433 | * Find suitable number of IO TLB entries size that will fit this | 
|  | 434 | * request and allocate a buffer from that IO TLB pool. | 
|  | 435 | */ | 
|  | 436 | spin_lock_irqsave(&io_tlb_lock, flags); | 
| Andrew Morton | a7133a1 | 2008-04-29 00:59:36 -0700 | [diff] [blame] | 437 | index = ALIGN(io_tlb_index, stride); | 
|  | 438 | if (index >= io_tlb_nslabs) | 
|  | 439 | index = 0; | 
|  | 440 | wrap = index; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 |  | 
| Andrew Morton | a7133a1 | 2008-04-29 00:59:36 -0700 | [diff] [blame] | 442 | do { | 
| FUJITA Tomonori | a852250 | 2008-04-29 00:59:36 -0700 | [diff] [blame] | 443 | while (iommu_is_span_boundary(index, nslots, offset_slots, | 
|  | 444 | max_slots)) { | 
| Jan Beulich | b15a389 | 2008-03-13 09:13:30 +0000 | [diff] [blame] | 445 | index += stride; | 
|  | 446 | if (index >= io_tlb_nslabs) | 
|  | 447 | index = 0; | 
| Andrew Morton | a7133a1 | 2008-04-29 00:59:36 -0700 | [diff] [blame] | 448 | if (index == wrap) | 
|  | 449 | goto not_found; | 
|  | 450 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 451 |  | 
| Andrew Morton | a7133a1 | 2008-04-29 00:59:36 -0700 | [diff] [blame] | 452 | /* | 
|  | 453 | * If we find a slot that indicates we have 'nslots' number of | 
|  | 454 | * contiguous buffers, we allocate the buffers from that slot | 
|  | 455 | * and mark the entries as '0' indicating unavailable. | 
|  | 456 | */ | 
|  | 457 | if (io_tlb_list[index] >= nslots) { | 
|  | 458 | int count = 0; | 
|  | 459 |  | 
|  | 460 | for (i = index; i < (int) (index + nslots); i++) | 
|  | 461 | io_tlb_list[i] = 0; | 
|  | 462 | for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) | 
|  | 463 | io_tlb_list[i] = ++count; | 
|  | 464 | dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); | 
|  | 465 |  | 
|  | 466 | /* | 
|  | 467 | * Update the indices to avoid searching in the next | 
|  | 468 | * round. | 
|  | 469 | */ | 
|  | 470 | io_tlb_index = ((index + nslots) < io_tlb_nslabs | 
|  | 471 | ? (index + nslots) : 0); | 
|  | 472 |  | 
|  | 473 | goto found; | 
|  | 474 | } | 
|  | 475 | index += stride; | 
|  | 476 | if (index >= io_tlb_nslabs) | 
|  | 477 | index = 0; | 
|  | 478 | } while (index != wrap); | 
|  | 479 |  | 
|  | 480 | not_found: | 
|  | 481 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 
|  | 482 | return NULL; | 
|  | 483 | found: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 
|  | 485 |  | 
|  | 486 | /* | 
|  | 487 | * Save away the mapping from the original address to the DMA address. | 
|  | 488 | * This is needed when we sync the memory.  Then we sync the buffer if | 
|  | 489 | * needed. | 
|  | 490 | */ | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 491 | slot_buf = buffer; | 
|  | 492 | for (i = 0; i < nslots; i++) { | 
|  | 493 | slot_buf.page += slot_buf.offset >> PAGE_SHIFT; | 
|  | 494 | slot_buf.offset &= PAGE_SIZE - 1; | 
|  | 495 | io_tlb_orig_addr[index+i] = slot_buf; | 
|  | 496 | slot_buf.offset += 1 << IO_TLB_SHIFT; | 
|  | 497 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) | 
| Jeremy Fitzhardinge | 1b548f6 | 2008-12-16 12:17:32 -0800 | [diff] [blame] | 499 | __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 |  | 
|  | 501 | return dma_addr; | 
|  | 502 | } | 
|  | 503 |  | 
|  | 504 | /* | 
|  | 505 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. | 
|  | 506 | */ | 
|  | 507 | static void | 
|  | 508 | unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | 
|  | 509 | { | 
|  | 510 | unsigned long flags; | 
|  | 511 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 
|  | 512 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 513 | struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 |  | 
|  | 515 | /* | 
|  | 516 | * First, sync the memory before unmapping the entry | 
|  | 517 | */ | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 518 | if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 | /* | 
|  | 520 | * bounce... copy the data back into the original buffer * and | 
|  | 521 | * delete the bounce buffer. | 
|  | 522 | */ | 
| Jeremy Fitzhardinge | 1b548f6 | 2008-12-16 12:17:32 -0800 | [diff] [blame] | 523 | __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 |  | 
|  | 525 | /* | 
|  | 526 | * Return the buffer to the free list by setting the corresponding | 
|  | 527 | * entries to indicate the number of contigous entries available. | 
|  | 528 | * While returning the entries to the free list, we merge the entries | 
|  | 529 | * with slots below and above the pool being returned. | 
|  | 530 | */ | 
|  | 531 | spin_lock_irqsave(&io_tlb_lock, flags); | 
|  | 532 | { | 
|  | 533 | count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ? | 
|  | 534 | io_tlb_list[index + nslots] : 0); | 
|  | 535 | /* | 
|  | 536 | * Step 1: return the slots to the free list, merging the | 
|  | 537 | * slots with superceeding slots | 
|  | 538 | */ | 
|  | 539 | for (i = index + nslots - 1; i >= index; i--) | 
|  | 540 | io_tlb_list[i] = ++count; | 
|  | 541 | /* | 
|  | 542 | * Step 2: merge the returned slots with the preceding slots, | 
|  | 543 | * if available (non zero) | 
|  | 544 | */ | 
|  | 545 | for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) | 
|  | 546 | io_tlb_list[i] = ++count; | 
|  | 547 | } | 
|  | 548 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 
|  | 549 | } | 
|  | 550 |  | 
|  | 551 | static void | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 552 | sync_single(struct device *hwdev, char *dma_addr, size_t size, | 
|  | 553 | int dir, int target) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | { | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 555 | struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr); | 
| Keir Fraser | df336d1 | 2007-07-21 04:37:24 -0700 | [diff] [blame] | 556 |  | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 557 | switch (target) { | 
|  | 558 | case SYNC_FOR_CPU: | 
|  | 559 | if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) | 
| Jeremy Fitzhardinge | 1b548f6 | 2008-12-16 12:17:32 -0800 | [diff] [blame] | 560 | __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE); | 
| Eric Sesterhenn | 3481454 | 2006-03-24 18:47:11 +0100 | [diff] [blame] | 561 | else | 
|  | 562 | BUG_ON(dir != DMA_TO_DEVICE); | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 563 | break; | 
|  | 564 | case SYNC_FOR_DEVICE: | 
|  | 565 | if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) | 
| Jeremy Fitzhardinge | 1b548f6 | 2008-12-16 12:17:32 -0800 | [diff] [blame] | 566 | __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE); | 
| Eric Sesterhenn | 3481454 | 2006-03-24 18:47:11 +0100 | [diff] [blame] | 567 | else | 
|  | 568 | BUG_ON(dir != DMA_FROM_DEVICE); | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 569 | break; | 
|  | 570 | default: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | BUG(); | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 572 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | } | 
|  | 574 |  | 
|  | 575 | void * | 
|  | 576 | swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 
| Al Viro | 06a5449 | 2005-10-21 03:21:03 -0400 | [diff] [blame] | 577 | dma_addr_t *dma_handle, gfp_t flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | { | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 579 | dma_addr_t dev_addr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | void *ret; | 
|  | 581 | int order = get_order(size); | 
| FUJITA Tomonori | 1e74f30 | 2008-11-17 16:24:34 +0900 | [diff] [blame] | 582 | u64 dma_mask = DMA_32BIT_MASK; | 
|  | 583 |  | 
|  | 584 | if (hwdev && hwdev->coherent_dma_mask) | 
|  | 585 | dma_mask = hwdev->coherent_dma_mask; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 586 |  | 
| Tony Luck | 25667d6 | 2007-03-06 13:31:45 -0800 | [diff] [blame] | 587 | ret = (void *)__get_free_pages(flags, order); | 
| Ian Campbell | e08e1f7 | 2008-12-16 12:17:30 -0800 | [diff] [blame] | 588 | if (ret && !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(ret), size)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | /* | 
|  | 590 | * The allocated memory isn't reachable by the device. | 
|  | 591 | * Fall back on swiotlb_map_single(). | 
|  | 592 | */ | 
|  | 593 | free_pages((unsigned long) ret, order); | 
|  | 594 | ret = NULL; | 
|  | 595 | } | 
|  | 596 | if (!ret) { | 
|  | 597 | /* | 
|  | 598 | * We are either out of memory or the device can't DMA | 
|  | 599 | * to GFP_DMA memory; fall back on | 
|  | 600 | * swiotlb_map_single(), which will grab memory from | 
|  | 601 | * the lowest available address range. | 
|  | 602 | */ | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 603 | struct swiotlb_phys_addr buffer; | 
|  | 604 | buffer.page = virt_to_page(NULL); | 
|  | 605 | buffer.offset = 0; | 
|  | 606 | ret = map_single(hwdev, buffer, size, DMA_FROM_DEVICE); | 
| FUJITA Tomonori | 9dfda12 | 2008-09-08 18:53:48 +0900 | [diff] [blame] | 607 | if (!ret) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 609 | } | 
|  | 610 |  | 
|  | 611 | memset(ret, 0, size); | 
| Ian Campbell | e08e1f7 | 2008-12-16 12:17:30 -0800 | [diff] [blame] | 612 | dev_addr = swiotlb_virt_to_bus(ret); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 |  | 
|  | 614 | /* Confirm address can be DMA'd by device */ | 
| FUJITA Tomonori | 1e74f30 | 2008-11-17 16:24:34 +0900 | [diff] [blame] | 615 | if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 616 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", | 
| FUJITA Tomonori | 1e74f30 | 2008-11-17 16:24:34 +0900 | [diff] [blame] | 617 | (unsigned long long)dma_mask, | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 618 | (unsigned long long)dev_addr); | 
| FUJITA Tomonori | a2b89b5 | 2008-10-23 18:42:03 +0900 | [diff] [blame] | 619 |  | 
|  | 620 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 
|  | 621 | unmap_single(hwdev, ret, size, DMA_TO_DEVICE); | 
|  | 622 | return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | } | 
|  | 624 | *dma_handle = dev_addr; | 
|  | 625 | return ret; | 
|  | 626 | } | 
|  | 627 |  | 
|  | 628 | void | 
|  | 629 | swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | 
|  | 630 | dma_addr_t dma_handle) | 
|  | 631 | { | 
| David Brownell | aa24886 | 2007-08-10 13:10:27 -0700 | [diff] [blame] | 632 | WARN_ON(irqs_disabled()); | 
| FUJITA Tomonori | 640aebf | 2008-09-08 18:53:50 +0900 | [diff] [blame] | 633 | if (!is_swiotlb_buffer(vaddr)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 | free_pages((unsigned long) vaddr, get_order(size)); | 
|  | 635 | else | 
|  | 636 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 
| FUJITA Tomonori | 21f6c4d | 2008-09-08 18:53:49 +0900 | [diff] [blame] | 637 | unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 | } | 
|  | 639 |  | 
|  | 640 | static void | 
|  | 641 | swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) | 
|  | 642 | { | 
|  | 643 | /* | 
|  | 644 | * Ran out of IOMMU space for this operation. This is very bad. | 
|  | 645 | * Unfortunately the drivers cannot handle this operation properly. | 
| Tony Luck | 17e5ad6 | 2005-09-29 15:52:13 -0700 | [diff] [blame] | 646 | * unless they check for dma_mapping_error (most don't) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 | * When the mapping is small enough return a static buffer to limit | 
|  | 648 | * the damage, or panic when the transfer is too big. | 
|  | 649 | */ | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 650 | printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at " | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | "device %s\n", size, dev ? dev->bus_id : "?"); | 
|  | 652 |  | 
|  | 653 | if (size > io_tlb_overflow && do_panic) { | 
| Tony Luck | 17e5ad6 | 2005-09-29 15:52:13 -0700 | [diff] [blame] | 654 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) | 
|  | 655 | panic("DMA: Memory would be corrupted\n"); | 
|  | 656 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) | 
|  | 657 | panic("DMA: Random memory would be DMAed\n"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 658 | } | 
|  | 659 | } | 
|  | 660 |  | 
|  | 661 | /* | 
|  | 662 | * Map a single buffer of the indicated size for DMA in streaming mode.  The | 
| Tony Luck | 17e5ad6 | 2005-09-29 15:52:13 -0700 | [diff] [blame] | 663 | * physical address to use is returned. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 664 | * | 
|  | 665 | * Once the device is given the dma address, the device owns this memory until | 
|  | 666 | * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. | 
|  | 667 | */ | 
|  | 668 | dma_addr_t | 
| Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 669 | swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | 
|  | 670 | int dir, struct dma_attrs *attrs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 | { | 
| Ian Campbell | e08e1f7 | 2008-12-16 12:17:30 -0800 | [diff] [blame] | 672 | dma_addr_t dev_addr = swiotlb_virt_to_bus(ptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 673 | void *map; | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 674 | struct swiotlb_phys_addr buffer; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 675 |  | 
| Eric Sesterhenn | 3481454 | 2006-03-24 18:47:11 +0100 | [diff] [blame] | 676 | BUG_ON(dir == DMA_NONE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 677 | /* | 
|  | 678 | * If the pointer passed in happens to be in the device's DMA window, | 
|  | 679 | * we can safely return the device addr and not worry about bounce | 
|  | 680 | * buffering it. | 
|  | 681 | */ | 
| Ian Campbell | b81ea27 | 2008-12-16 12:17:31 -0800 | [diff] [blame] | 682 | if (!address_needs_mapping(hwdev, dev_addr, size) && | 
|  | 683 | !range_needs_mapping(ptr, size)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 684 | return dev_addr; | 
|  | 685 |  | 
|  | 686 | /* | 
|  | 687 | * Oh well, have to allocate and map a bounce buffer. | 
|  | 688 | */ | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 689 | buffer.page   = virt_to_page(ptr); | 
|  | 690 | buffer.offset = (unsigned long)ptr & ~PAGE_MASK; | 
|  | 691 | map = map_single(hwdev, buffer, size, dir); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | if (!map) { | 
|  | 693 | swiotlb_full(hwdev, size, dir, 1); | 
|  | 694 | map = io_tlb_overflow_buffer; | 
|  | 695 | } | 
|  | 696 |  | 
| Ian Campbell | e08e1f7 | 2008-12-16 12:17:30 -0800 | [diff] [blame] | 697 | dev_addr = swiotlb_virt_to_bus(map); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 |  | 
|  | 699 | /* | 
|  | 700 | * Ensure that the address returned is DMA'ble | 
|  | 701 | */ | 
| FUJITA Tomonori | 2797982 | 2008-09-10 01:06:49 +0900 | [diff] [blame] | 702 | if (address_needs_mapping(hwdev, dev_addr, size)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 703 | panic("map_single: bounce buffer is not DMA'ble"); | 
|  | 704 |  | 
|  | 705 | return dev_addr; | 
|  | 706 | } | 
| Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 707 | EXPORT_SYMBOL(swiotlb_map_single_attrs); | 
|  | 708 |  | 
|  | 709 | dma_addr_t | 
|  | 710 | swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) | 
|  | 711 | { | 
|  | 712 | return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL); | 
|  | 713 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 |  | 
|  | 715 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 716 | * Unmap a single streaming mode DMA translation.  The dma_addr and size must | 
|  | 717 | * match what was provided for in a previous swiotlb_map_single call.  All | 
|  | 718 | * other usages are undefined. | 
|  | 719 | * | 
|  | 720 | * After this call, reads by the cpu to the buffer are guaranteed to see | 
|  | 721 | * whatever the device wrote there. | 
|  | 722 | */ | 
|  | 723 | void | 
| Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 724 | swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, | 
|  | 725 | size_t size, int dir, struct dma_attrs *attrs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 726 | { | 
| Ian Campbell | e08e1f7 | 2008-12-16 12:17:30 -0800 | [diff] [blame] | 727 | char *dma_addr = swiotlb_bus_to_virt(dev_addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 728 |  | 
| Eric Sesterhenn | 3481454 | 2006-03-24 18:47:11 +0100 | [diff] [blame] | 729 | BUG_ON(dir == DMA_NONE); | 
| FUJITA Tomonori | 640aebf | 2008-09-08 18:53:50 +0900 | [diff] [blame] | 730 | if (is_swiotlb_buffer(dma_addr)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 731 | unmap_single(hwdev, dma_addr, size, dir); | 
|  | 732 | else if (dir == DMA_FROM_DEVICE) | 
| Jan Beulich | cde14bb | 2007-02-05 18:46:40 -0800 | [diff] [blame] | 733 | dma_mark_clean(dma_addr, size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | } | 
| Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 735 | EXPORT_SYMBOL(swiotlb_unmap_single_attrs); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 736 |  | 
| Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 737 | void | 
|  | 738 | swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, | 
|  | 739 | int dir) | 
|  | 740 | { | 
|  | 741 | return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL); | 
|  | 742 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 743 | /* | 
|  | 744 | * Make physical memory consistent for a single streaming mode DMA translation | 
|  | 745 | * after a transfer. | 
|  | 746 | * | 
|  | 747 | * If you perform a swiotlb_map_single() but wish to interrogate the buffer | 
| Tony Luck | 17e5ad6 | 2005-09-29 15:52:13 -0700 | [diff] [blame] | 748 | * using the cpu, yet do not wish to teardown the dma mapping, you must | 
|  | 749 | * call this function before doing so.  At the next point you give the dma | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 750 | * address back to the card, you must first perform a | 
|  | 751 | * swiotlb_dma_sync_for_device, and then the device again owns the buffer | 
|  | 752 | */ | 
| Andrew Morton | be6b026 | 2007-02-12 00:52:17 -0800 | [diff] [blame] | 753 | static void | 
| John W. Linville | 8270f3f | 2005-09-29 14:43:32 -0700 | [diff] [blame] | 754 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 755 | size_t size, int dir, int target) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 | { | 
| Ian Campbell | e08e1f7 | 2008-12-16 12:17:30 -0800 | [diff] [blame] | 757 | char *dma_addr = swiotlb_bus_to_virt(dev_addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 758 |  | 
| Eric Sesterhenn | 3481454 | 2006-03-24 18:47:11 +0100 | [diff] [blame] | 759 | BUG_ON(dir == DMA_NONE); | 
| FUJITA Tomonori | 640aebf | 2008-09-08 18:53:50 +0900 | [diff] [blame] | 760 | if (is_swiotlb_buffer(dma_addr)) | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 761 | sync_single(hwdev, dma_addr, size, dir, target); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 762 | else if (dir == DMA_FROM_DEVICE) | 
| Jan Beulich | cde14bb | 2007-02-05 18:46:40 -0800 | [diff] [blame] | 763 | dma_mark_clean(dma_addr, size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 | } | 
|  | 765 |  | 
|  | 766 | void | 
| John W. Linville | 8270f3f | 2005-09-29 14:43:32 -0700 | [diff] [blame] | 767 | swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | 
|  | 768 | size_t size, int dir) | 
|  | 769 | { | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 770 | swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); | 
| John W. Linville | 8270f3f | 2005-09-29 14:43:32 -0700 | [diff] [blame] | 771 | } | 
|  | 772 |  | 
|  | 773 | void | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 774 | swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, | 
|  | 775 | size_t size, int dir) | 
|  | 776 | { | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 777 | swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | } | 
|  | 779 |  | 
|  | 780 | /* | 
| John W. Linville | 878a97c | 2005-09-29 14:44:23 -0700 | [diff] [blame] | 781 | * Same as above, but for a sub-range of the mapping. | 
|  | 782 | */ | 
| Andrew Morton | be6b026 | 2007-02-12 00:52:17 -0800 | [diff] [blame] | 783 | static void | 
| John W. Linville | 878a97c | 2005-09-29 14:44:23 -0700 | [diff] [blame] | 784 | swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 785 | unsigned long offset, size_t size, | 
|  | 786 | int dir, int target) | 
| John W. Linville | 878a97c | 2005-09-29 14:44:23 -0700 | [diff] [blame] | 787 | { | 
| Ian Campbell | e08e1f7 | 2008-12-16 12:17:30 -0800 | [diff] [blame] | 788 | char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset; | 
| John W. Linville | 878a97c | 2005-09-29 14:44:23 -0700 | [diff] [blame] | 789 |  | 
| Eric Sesterhenn | 3481454 | 2006-03-24 18:47:11 +0100 | [diff] [blame] | 790 | BUG_ON(dir == DMA_NONE); | 
| FUJITA Tomonori | 640aebf | 2008-09-08 18:53:50 +0900 | [diff] [blame] | 791 | if (is_swiotlb_buffer(dma_addr)) | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 792 | sync_single(hwdev, dma_addr, size, dir, target); | 
| John W. Linville | 878a97c | 2005-09-29 14:44:23 -0700 | [diff] [blame] | 793 | else if (dir == DMA_FROM_DEVICE) | 
| Jan Beulich | cde14bb | 2007-02-05 18:46:40 -0800 | [diff] [blame] | 794 | dma_mark_clean(dma_addr, size); | 
| John W. Linville | 878a97c | 2005-09-29 14:44:23 -0700 | [diff] [blame] | 795 | } | 
|  | 796 |  | 
|  | 797 | void | 
|  | 798 | swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | 
|  | 799 | unsigned long offset, size_t size, int dir) | 
|  | 800 | { | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 801 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, | 
|  | 802 | SYNC_FOR_CPU); | 
| John W. Linville | 878a97c | 2005-09-29 14:44:23 -0700 | [diff] [blame] | 803 | } | 
|  | 804 |  | 
|  | 805 | void | 
|  | 806 | swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, | 
|  | 807 | unsigned long offset, size_t size, int dir) | 
|  | 808 | { | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 809 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, | 
|  | 810 | SYNC_FOR_DEVICE); | 
| John W. Linville | 878a97c | 2005-09-29 14:44:23 -0700 | [diff] [blame] | 811 | } | 
|  | 812 |  | 
| Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 813 | void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int, | 
|  | 814 | struct dma_attrs *); | 
| John W. Linville | 878a97c | 2005-09-29 14:44:23 -0700 | [diff] [blame] | 815 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 816 | * Map a set of buffers described by scatterlist in streaming mode for DMA. | 
|  | 817 | * This is the scatter-gather version of the above swiotlb_map_single | 
|  | 818 | * interface.  Here the scatter gather list elements are each tagged with the | 
|  | 819 | * appropriate dma address and length.  They are obtained via | 
|  | 820 | * sg_dma_{address,length}(SG). | 
|  | 821 | * | 
|  | 822 | * NOTE: An implementation may be able to use a smaller number of | 
|  | 823 | *       DMA address/length pairs than there are SG table elements. | 
|  | 824 | *       (for example via virtual mapping capabilities) | 
|  | 825 | *       The routine returns the number of addr/length pairs actually | 
|  | 826 | *       used, at most nents. | 
|  | 827 | * | 
|  | 828 | * Device ownership issues as mentioned above for swiotlb_map_single are the | 
|  | 829 | * same here. | 
|  | 830 | */ | 
|  | 831 | int | 
| Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 832 | swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | 
|  | 833 | int dir, struct dma_attrs *attrs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 834 | { | 
| Jens Axboe | dbfd49f | 2007-05-11 14:56:18 +0200 | [diff] [blame] | 835 | struct scatterlist *sg; | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 836 | struct swiotlb_phys_addr buffer; | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 837 | dma_addr_t dev_addr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 838 | int i; | 
|  | 839 |  | 
| Eric Sesterhenn | 3481454 | 2006-03-24 18:47:11 +0100 | [diff] [blame] | 840 | BUG_ON(dir == DMA_NONE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 841 |  | 
| Jens Axboe | dbfd49f | 2007-05-11 14:56:18 +0200 | [diff] [blame] | 842 | for_each_sg(sgl, sg, nelems, i) { | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 843 | dev_addr = swiotlb_sg_to_bus(sg); | 
| Ian Campbell | b81ea27 | 2008-12-16 12:17:31 -0800 | [diff] [blame] | 844 | if (range_needs_mapping(sg_virt(sg), sg->length) || | 
| FUJITA Tomonori | 2797982 | 2008-09-10 01:06:49 +0900 | [diff] [blame] | 845 | address_needs_mapping(hwdev, dev_addr, sg->length)) { | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 846 | void *map; | 
|  | 847 | buffer.page   = sg_page(sg); | 
|  | 848 | buffer.offset = sg->offset; | 
|  | 849 | map = map_single(hwdev, buffer, sg->length, dir); | 
| Andi Kleen | 7e87023 | 2005-12-20 14:45:19 +0100 | [diff] [blame] | 850 | if (!map) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 851 | /* Don't panic here, we expect map_sg users | 
|  | 852 | to do proper error handling. */ | 
|  | 853 | swiotlb_full(hwdev, sg->length, dir, 0); | 
| Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 854 | swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, | 
|  | 855 | attrs); | 
| Jens Axboe | dbfd49f | 2007-05-11 14:56:18 +0200 | [diff] [blame] | 856 | sgl[0].dma_length = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 857 | return 0; | 
|  | 858 | } | 
| Ian Campbell | e08e1f7 | 2008-12-16 12:17:30 -0800 | [diff] [blame] | 859 | sg->dma_address = swiotlb_virt_to_bus(map); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 860 | } else | 
|  | 861 | sg->dma_address = dev_addr; | 
|  | 862 | sg->dma_length = sg->length; | 
|  | 863 | } | 
|  | 864 | return nelems; | 
|  | 865 | } | 
| Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 866 | EXPORT_SYMBOL(swiotlb_map_sg_attrs); | 
|  | 867 |  | 
|  | 868 | int | 
|  | 869 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | 
|  | 870 | int dir) | 
|  | 871 | { | 
|  | 872 | return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); | 
|  | 873 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 |  | 
|  | 875 | /* | 
|  | 876 | * Unmap a set of streaming mode DMA translations.  Again, cpu read rules | 
|  | 877 | * concerning calls here are the same as for swiotlb_unmap_single() above. | 
|  | 878 | */ | 
|  | 879 | void | 
| Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 880 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | 
|  | 881 | int nelems, int dir, struct dma_attrs *attrs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 882 | { | 
| Jens Axboe | dbfd49f | 2007-05-11 14:56:18 +0200 | [diff] [blame] | 883 | struct scatterlist *sg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 884 | int i; | 
|  | 885 |  | 
| Eric Sesterhenn | 3481454 | 2006-03-24 18:47:11 +0100 | [diff] [blame] | 886 | BUG_ON(dir == DMA_NONE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 887 |  | 
| Jens Axboe | dbfd49f | 2007-05-11 14:56:18 +0200 | [diff] [blame] | 888 | for_each_sg(sgl, sg, nelems, i) { | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 889 | if (sg->dma_address != swiotlb_sg_to_bus(sg)) | 
| Ian Campbell | e08e1f7 | 2008-12-16 12:17:30 -0800 | [diff] [blame] | 890 | unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), | 
| Jan Beulich | 93fbff6 | 2007-02-05 18:49:45 -0800 | [diff] [blame] | 891 | sg->dma_length, dir); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 892 | else if (dir == DMA_FROM_DEVICE) | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 893 | dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); | 
| Jens Axboe | dbfd49f | 2007-05-11 14:56:18 +0200 | [diff] [blame] | 894 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 895 | } | 
| Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 896 | EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); | 
|  | 897 |  | 
|  | 898 | void | 
|  | 899 | swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | 
|  | 900 | int dir) | 
|  | 901 | { | 
|  | 902 | return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); | 
|  | 903 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 904 |  | 
|  | 905 | /* | 
|  | 906 | * Make physical memory consistent for a set of streaming mode DMA translations | 
|  | 907 | * after a transfer. | 
|  | 908 | * | 
|  | 909 | * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules | 
|  | 910 | * and usage. | 
|  | 911 | */ | 
| Andrew Morton | be6b026 | 2007-02-12 00:52:17 -0800 | [diff] [blame] | 912 | static void | 
| Jens Axboe | dbfd49f | 2007-05-11 14:56:18 +0200 | [diff] [blame] | 913 | swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 914 | int nelems, int dir, int target) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 915 | { | 
| Jens Axboe | dbfd49f | 2007-05-11 14:56:18 +0200 | [diff] [blame] | 916 | struct scatterlist *sg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 917 | int i; | 
|  | 918 |  | 
| Eric Sesterhenn | 3481454 | 2006-03-24 18:47:11 +0100 | [diff] [blame] | 919 | BUG_ON(dir == DMA_NONE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 920 |  | 
| Jens Axboe | dbfd49f | 2007-05-11 14:56:18 +0200 | [diff] [blame] | 921 | for_each_sg(sgl, sg, nelems, i) { | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 922 | if (sg->dma_address != swiotlb_sg_to_bus(sg)) | 
| Ian Campbell | e08e1f7 | 2008-12-16 12:17:30 -0800 | [diff] [blame] | 923 | sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 924 | sg->dma_length, dir, target); | 
| Jan Beulich | cde14bb | 2007-02-05 18:46:40 -0800 | [diff] [blame] | 925 | else if (dir == DMA_FROM_DEVICE) | 
| Jeremy Fitzhardinge | ef9b189 | 2008-12-16 12:17:33 -0800 | [diff] [blame] | 926 | dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); | 
| Jens Axboe | dbfd49f | 2007-05-11 14:56:18 +0200 | [diff] [blame] | 927 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 928 | } | 
|  | 929 |  | 
|  | 930 | void | 
| John W. Linville | 8270f3f | 2005-09-29 14:43:32 -0700 | [diff] [blame] | 931 | swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | 
|  | 932 | int nelems, int dir) | 
|  | 933 | { | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 934 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); | 
| John W. Linville | 8270f3f | 2005-09-29 14:43:32 -0700 | [diff] [blame] | 935 | } | 
|  | 936 |  | 
|  | 937 | void | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 938 | swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | 
|  | 939 | int nelems, int dir) | 
|  | 940 | { | 
| John W. Linville | de69e0f | 2005-09-29 14:44:57 -0700 | [diff] [blame] | 941 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 942 | } | 
|  | 943 |  | 
|  | 944 | int | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 945 | swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 946 | { | 
| Ian Campbell | e08e1f7 | 2008-12-16 12:17:30 -0800 | [diff] [blame] | 947 | return (dma_addr == swiotlb_virt_to_bus(io_tlb_overflow_buffer)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 948 | } | 
|  | 949 |  | 
|  | 950 | /* | 
| Tony Luck | 17e5ad6 | 2005-09-29 15:52:13 -0700 | [diff] [blame] | 951 | * Return whether the given device DMA address mask can be supported | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 952 | * properly.  For example, if your device can only drive the low 24-bits | 
| Tony Luck | 17e5ad6 | 2005-09-29 15:52:13 -0700 | [diff] [blame] | 953 | * during bus mastering, then you would pass 0x00ffffff as the mask to | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 954 | * this function. | 
|  | 955 | */ | 
|  | 956 | int | 
| Jan Beulich | 563aaf0 | 2007-02-05 18:51:25 -0800 | [diff] [blame] | 957 | swiotlb_dma_supported(struct device *hwdev, u64 mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 958 | { | 
| Ian Campbell | e08e1f7 | 2008-12-16 12:17:30 -0800 | [diff] [blame] | 959 | return swiotlb_virt_to_bus(io_tlb_end - 1) <= mask; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 960 | } | 
|  | 961 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 962 | EXPORT_SYMBOL(swiotlb_map_single); | 
|  | 963 | EXPORT_SYMBOL(swiotlb_unmap_single); | 
|  | 964 | EXPORT_SYMBOL(swiotlb_map_sg); | 
|  | 965 | EXPORT_SYMBOL(swiotlb_unmap_sg); | 
|  | 966 | EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); | 
|  | 967 | EXPORT_SYMBOL(swiotlb_sync_single_for_device); | 
| John W. Linville | 878a97c | 2005-09-29 14:44:23 -0700 | [diff] [blame] | 968 | EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu); | 
|  | 969 | EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 970 | EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); | 
|  | 971 | EXPORT_SYMBOL(swiotlb_sync_sg_for_device); | 
|  | 972 | EXPORT_SYMBOL(swiotlb_dma_mapping_error); | 
| Tony Luck | 25667d6 | 2007-03-06 13:31:45 -0800 | [diff] [blame] | 973 | EXPORT_SYMBOL(swiotlb_alloc_coherent); | 
|  | 974 | EXPORT_SYMBOL(swiotlb_free_coherent); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 975 | EXPORT_SYMBOL(swiotlb_dma_supported); |