Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2010 |
| 3 | * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
| 4 | * |
| 5 | * This code provides a IOMMU for Xen PV guests with PCI passthrough. |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License v2.0 as published by |
| 9 | * the Free Software Foundation |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | * PV guests under Xen are running in an non-contiguous memory architecture. |
| 17 | * |
| 18 | * When PCI pass-through is utilized, this necessitates an IOMMU for |
| 19 | * translating bus (DMA) to virtual and vice-versa and also providing a |
| 20 | * mechanism to have contiguous pages for device drivers operations (say DMA |
| 21 | * operations). |
| 22 | * |
| 23 | * Specifically, under Xen the Linux idea of pages is an illusion. It |
| 24 | * assumes that pages start at zero and go up to the available memory. To |
| 25 | * help with that, the Linux Xen MMU provides a lookup mechanism to |
| 26 | * translate the page frame numbers (PFN) to machine frame numbers (MFN) |
| 27 | * and vice-versa. The MFN are the "real" frame numbers. Furthermore |
| 28 | * memory is not contiguous. Xen hypervisor stitches memory for guests |
| 29 | * from different pools, which means there is no guarantee that PFN==MFN |
| 30 | * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are |
| 31 | * allocated in descending order (high to low), meaning the guest might |
| 32 | * never get any MFN's under the 4GB mark. |
| 33 | * |
| 34 | */ |
| 35 | |
| 36 | #include <linux/bootmem.h> |
| 37 | #include <linux/dma-mapping.h> |
Paul Gortmaker | 63c9744 | 2011-07-10 13:22:07 -0400 | [diff] [blame] | 38 | #include <linux/export.h> |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 39 | #include <xen/swiotlb-xen.h> |
| 40 | #include <xen/page.h> |
| 41 | #include <xen/xen-ops.h> |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 42 | #include <xen/hvc-console.h> |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 43 | /* |
| 44 | * Used to do a quick range check in swiotlb_tbl_unmap_single and |
| 45 | * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this |
| 46 | * API. |
| 47 | */ |
| 48 | |
| 49 | static char *xen_io_tlb_start, *xen_io_tlb_end; |
| 50 | static unsigned long xen_io_tlb_nslabs; |
| 51 | /* |
| 52 | * Quick lookup value of the bus address of the IOTLB. |
| 53 | */ |
| 54 | |
Konrad Rzeszutek Wilk | b8b0f55 | 2012-08-21 14:49:34 -0400 | [diff] [blame] | 55 | static u64 start_dma_addr; |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 56 | |
| 57 | static dma_addr_t xen_phys_to_bus(phys_addr_t paddr) |
| 58 | { |
Justin P. Mattock | 6eab04a | 2011-04-08 19:49:08 -0700 | [diff] [blame] | 59 | return phys_to_machine(XPADDR(paddr)).maddr; |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 60 | } |
| 61 | |
| 62 | static phys_addr_t xen_bus_to_phys(dma_addr_t baddr) |
| 63 | { |
| 64 | return machine_to_phys(XMADDR(baddr)).paddr; |
| 65 | } |
| 66 | |
| 67 | static dma_addr_t xen_virt_to_bus(void *address) |
| 68 | { |
| 69 | return xen_phys_to_bus(virt_to_phys(address)); |
| 70 | } |
| 71 | |
| 72 | static int check_pages_physically_contiguous(unsigned long pfn, |
| 73 | unsigned int offset, |
| 74 | size_t length) |
| 75 | { |
| 76 | unsigned long next_mfn; |
| 77 | int i; |
| 78 | int nr_pages; |
| 79 | |
| 80 | next_mfn = pfn_to_mfn(pfn); |
| 81 | nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT; |
| 82 | |
| 83 | for (i = 1; i < nr_pages; i++) { |
| 84 | if (pfn_to_mfn(++pfn) != ++next_mfn) |
| 85 | return 0; |
| 86 | } |
| 87 | return 1; |
| 88 | } |
| 89 | |
| 90 | static int range_straddles_page_boundary(phys_addr_t p, size_t size) |
| 91 | { |
| 92 | unsigned long pfn = PFN_DOWN(p); |
| 93 | unsigned int offset = p & ~PAGE_MASK; |
| 94 | |
| 95 | if (offset + size <= PAGE_SIZE) |
| 96 | return 0; |
| 97 | if (check_pages_physically_contiguous(pfn, offset, size)) |
| 98 | return 0; |
| 99 | return 1; |
| 100 | } |
| 101 | |
| 102 | static int is_xen_swiotlb_buffer(dma_addr_t dma_addr) |
| 103 | { |
| 104 | unsigned long mfn = PFN_DOWN(dma_addr); |
| 105 | unsigned long pfn = mfn_to_local_pfn(mfn); |
| 106 | phys_addr_t paddr; |
| 107 | |
| 108 | /* If the address is outside our domain, it CAN |
| 109 | * have the same virtual address as another address |
| 110 | * in our domain. Therefore _only_ check address within our domain. |
| 111 | */ |
| 112 | if (pfn_valid(pfn)) { |
| 113 | paddr = PFN_PHYS(pfn); |
| 114 | return paddr >= virt_to_phys(xen_io_tlb_start) && |
| 115 | paddr < virt_to_phys(xen_io_tlb_end); |
| 116 | } |
| 117 | return 0; |
| 118 | } |
| 119 | |
| 120 | static int max_dma_bits = 32; |
| 121 | |
| 122 | static int |
| 123 | xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs) |
| 124 | { |
| 125 | int i, rc; |
| 126 | int dma_bits; |
| 127 | |
| 128 | dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT; |
| 129 | |
| 130 | i = 0; |
| 131 | do { |
| 132 | int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE); |
| 133 | |
| 134 | do { |
| 135 | rc = xen_create_contiguous_region( |
| 136 | (unsigned long)buf + (i << IO_TLB_SHIFT), |
| 137 | get_order(slabs << IO_TLB_SHIFT), |
| 138 | dma_bits); |
| 139 | } while (rc && dma_bits++ < max_dma_bits); |
| 140 | if (rc) |
| 141 | return rc; |
| 142 | |
| 143 | i += slabs; |
| 144 | } while (i < nslabs); |
| 145 | return 0; |
| 146 | } |
Konrad Rzeszutek Wilk | 1cef36a | 2012-08-23 13:55:26 -0400 | [diff] [blame] | 147 | static unsigned long xen_set_nslabs(unsigned long nr_tbl) |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 148 | { |
Konrad Rzeszutek Wilk | 1cef36a | 2012-08-23 13:55:26 -0400 | [diff] [blame] | 149 | if (!nr_tbl) { |
FUJITA Tomonori | 5f98ecd | 2011-06-05 11:47:29 +0900 | [diff] [blame] | 150 | xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT); |
| 151 | xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE); |
Konrad Rzeszutek Wilk | 1cef36a | 2012-08-23 13:55:26 -0400 | [diff] [blame] | 152 | } else |
| 153 | xen_io_tlb_nslabs = nr_tbl; |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 154 | |
Konrad Rzeszutek Wilk | 1cef36a | 2012-08-23 13:55:26 -0400 | [diff] [blame] | 155 | return xen_io_tlb_nslabs << IO_TLB_SHIFT; |
| 156 | } |
Konrad Rzeszutek Wilk | 5bab786 | 2012-08-23 14:03:55 -0400 | [diff] [blame] | 157 | |
| 158 | enum xen_swiotlb_err { |
| 159 | XEN_SWIOTLB_UNKNOWN = 0, |
| 160 | XEN_SWIOTLB_ENOMEM, |
| 161 | XEN_SWIOTLB_EFIXUP |
| 162 | }; |
| 163 | |
| 164 | static const char *xen_swiotlb_error(enum xen_swiotlb_err err) |
| 165 | { |
| 166 | switch (err) { |
| 167 | case XEN_SWIOTLB_ENOMEM: |
| 168 | return "Cannot allocate Xen-SWIOTLB buffer\n"; |
| 169 | case XEN_SWIOTLB_EFIXUP: |
| 170 | return "Failed to get contiguous memory for DMA from Xen!\n"\ |
| 171 | "You either: don't have the permissions, do not have"\ |
| 172 | " enough free memory under 4GB, or the hypervisor memory"\ |
| 173 | " is too fragmented!"; |
| 174 | default: |
| 175 | break; |
| 176 | } |
| 177 | return ""; |
| 178 | } |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 179 | int __ref xen_swiotlb_init(int verbose, bool early) |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 180 | { |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 181 | unsigned long bytes, order; |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 182 | int rc = -ENOMEM; |
Konrad Rzeszutek Wilk | 5bab786 | 2012-08-23 14:03:55 -0400 | [diff] [blame] | 183 | enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN; |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 184 | unsigned int repeat = 3; |
| 185 | |
Konrad Rzeszutek Wilk | 1cef36a | 2012-08-23 13:55:26 -0400 | [diff] [blame] | 186 | xen_io_tlb_nslabs = swiotlb_nr_tbl(); |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 187 | retry: |
Konrad Rzeszutek Wilk | 1cef36a | 2012-08-23 13:55:26 -0400 | [diff] [blame] | 188 | bytes = xen_set_nslabs(xen_io_tlb_nslabs); |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 189 | order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT); |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 190 | /* |
| 191 | * Get IO TLB memory from any location. |
| 192 | */ |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 193 | if (early) |
| 194 | xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes)); |
| 195 | else { |
| 196 | #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) |
| 197 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) |
| 198 | while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { |
| 199 | xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order); |
| 200 | if (xen_io_tlb_start) |
| 201 | break; |
| 202 | order--; |
| 203 | } |
| 204 | if (order != get_order(bytes)) { |
| 205 | pr_warn("Warning: only able to allocate %ld MB " |
| 206 | "for software IO TLB\n", (PAGE_SIZE << order) >> 20); |
| 207 | xen_io_tlb_nslabs = SLABS_PER_PAGE << order; |
| 208 | bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT; |
| 209 | } |
| 210 | } |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 211 | if (!xen_io_tlb_start) { |
Konrad Rzeszutek Wilk | 5bab786 | 2012-08-23 14:03:55 -0400 | [diff] [blame] | 212 | m_ret = XEN_SWIOTLB_ENOMEM; |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 213 | goto error; |
| 214 | } |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 215 | xen_io_tlb_end = xen_io_tlb_start + bytes; |
| 216 | /* |
| 217 | * And replace that memory with pages under 4GB. |
| 218 | */ |
| 219 | rc = xen_swiotlb_fixup(xen_io_tlb_start, |
| 220 | bytes, |
| 221 | xen_io_tlb_nslabs); |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 222 | if (rc) { |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 223 | if (early) |
| 224 | free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes)); |
| 225 | else { |
| 226 | free_pages((unsigned long)xen_io_tlb_start, order); |
| 227 | xen_io_tlb_start = NULL; |
| 228 | } |
Konrad Rzeszutek Wilk | 5bab786 | 2012-08-23 14:03:55 -0400 | [diff] [blame] | 229 | m_ret = XEN_SWIOTLB_EFIXUP; |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 230 | goto error; |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 231 | } |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 232 | start_dma_addr = xen_virt_to_bus(xen_io_tlb_start); |
Konrad Rzeszutek Wilk | c468bde | 2012-09-17 10:20:09 -0400 | [diff] [blame] | 233 | if (early) { |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 234 | swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose); |
Konrad Rzeszutek Wilk | c468bde | 2012-09-17 10:20:09 -0400 | [diff] [blame] | 235 | rc = 0; |
| 236 | } else |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 237 | rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs); |
| 238 | return rc; |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 239 | error: |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 240 | if (repeat--) { |
| 241 | xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */ |
| 242 | (xen_io_tlb_nslabs >> 1)); |
| 243 | printk(KERN_INFO "Xen-SWIOTLB: Lowering to %luMB\n", |
| 244 | (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20); |
| 245 | goto retry; |
| 246 | } |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 247 | pr_err("%s (rc:%d)", xen_swiotlb_error(m_ret), rc); |
| 248 | if (early) |
| 249 | panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc); |
| 250 | else |
| 251 | free_pages((unsigned long)xen_io_tlb_start, order); |
| 252 | return rc; |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 253 | } |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 254 | void * |
| 255 | xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
Andrzej Pietrasiewicz | baa676f | 2012-03-27 14:28:18 +0200 | [diff] [blame] | 256 | dma_addr_t *dma_handle, gfp_t flags, |
| 257 | struct dma_attrs *attrs) |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 258 | { |
| 259 | void *ret; |
| 260 | int order = get_order(size); |
| 261 | u64 dma_mask = DMA_BIT_MASK(32); |
| 262 | unsigned long vstart; |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 263 | phys_addr_t phys; |
| 264 | dma_addr_t dev_addr; |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 265 | |
| 266 | /* |
| 267 | * Ignore region specifiers - the kernel's ideas of |
| 268 | * pseudo-phys memory layout has nothing to do with the |
| 269 | * machine physical layout. We can't allocate highmem |
| 270 | * because we can't return a pointer to it. |
| 271 | */ |
| 272 | flags &= ~(__GFP_DMA | __GFP_HIGHMEM); |
| 273 | |
| 274 | if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret)) |
| 275 | return ret; |
| 276 | |
| 277 | vstart = __get_free_pages(flags, order); |
| 278 | ret = (void *)vstart; |
| 279 | |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 280 | if (!ret) |
| 281 | return ret; |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 282 | |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 283 | if (hwdev && hwdev->coherent_dma_mask) |
Ronny Hegewald | b5031ed | 2012-08-31 09:57:52 +0000 | [diff] [blame] | 284 | dma_mask = dma_alloc_coherent_mask(hwdev, flags); |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 285 | |
| 286 | phys = virt_to_phys(ret); |
| 287 | dev_addr = xen_phys_to_bus(phys); |
| 288 | if (((dev_addr + size - 1 <= dma_mask)) && |
| 289 | !range_straddles_page_boundary(phys, size)) |
| 290 | *dma_handle = dev_addr; |
| 291 | else { |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 292 | if (xen_create_contiguous_region(vstart, order, |
| 293 | fls64(dma_mask)) != 0) { |
| 294 | free_pages(vstart, order); |
| 295 | return NULL; |
| 296 | } |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 297 | *dma_handle = virt_to_machine(ret).maddr; |
| 298 | } |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 299 | memset(ret, 0, size); |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 300 | return ret; |
| 301 | } |
| 302 | EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent); |
| 303 | |
| 304 | void |
| 305 | xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
Andrzej Pietrasiewicz | baa676f | 2012-03-27 14:28:18 +0200 | [diff] [blame] | 306 | dma_addr_t dev_addr, struct dma_attrs *attrs) |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 307 | { |
| 308 | int order = get_order(size); |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 309 | phys_addr_t phys; |
| 310 | u64 dma_mask = DMA_BIT_MASK(32); |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 311 | |
| 312 | if (dma_release_from_coherent(hwdev, order, vaddr)) |
| 313 | return; |
| 314 | |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 315 | if (hwdev && hwdev->coherent_dma_mask) |
| 316 | dma_mask = hwdev->coherent_dma_mask; |
| 317 | |
| 318 | phys = virt_to_phys(vaddr); |
| 319 | |
| 320 | if (((dev_addr + size - 1 > dma_mask)) || |
| 321 | range_straddles_page_boundary(phys, size)) |
| 322 | xen_destroy_contiguous_region((unsigned long)vaddr, order); |
| 323 | |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 324 | free_pages((unsigned long)vaddr, order); |
| 325 | } |
| 326 | EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); |
| 327 | |
| 328 | |
| 329 | /* |
| 330 | * Map a single buffer of the indicated size for DMA in streaming mode. The |
| 331 | * physical address to use is returned. |
| 332 | * |
| 333 | * Once the device is given the dma address, the device owns this memory until |
| 334 | * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed. |
| 335 | */ |
| 336 | dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, |
| 337 | unsigned long offset, size_t size, |
| 338 | enum dma_data_direction dir, |
| 339 | struct dma_attrs *attrs) |
| 340 | { |
Alexander Duyck | e05ed4d | 2012-10-15 10:19:39 -0700 | [diff] [blame] | 341 | phys_addr_t map, phys = page_to_phys(page) + offset; |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 342 | dma_addr_t dev_addr = xen_phys_to_bus(phys); |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 343 | |
| 344 | BUG_ON(dir == DMA_NONE); |
| 345 | /* |
| 346 | * If the address happens to be in the device's DMA window, |
| 347 | * we can safely return the device addr and not worry about bounce |
| 348 | * buffering it. |
| 349 | */ |
| 350 | if (dma_capable(dev, dev_addr, size) && |
| 351 | !range_straddles_page_boundary(phys, size) && !swiotlb_force) |
| 352 | return dev_addr; |
| 353 | |
| 354 | /* |
| 355 | * Oh well, have to allocate and map a bounce buffer. |
| 356 | */ |
| 357 | map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir); |
Alexander Duyck | e05ed4d | 2012-10-15 10:19:39 -0700 | [diff] [blame] | 358 | if (map == SWIOTLB_MAP_ERROR) |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 359 | return DMA_ERROR_CODE; |
| 360 | |
Alexander Duyck | e05ed4d | 2012-10-15 10:19:39 -0700 | [diff] [blame] | 361 | dev_addr = xen_phys_to_bus(map); |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 362 | |
| 363 | /* |
| 364 | * Ensure that the address returned is DMA'ble |
| 365 | */ |
Konrad Rzeszutek Wilk | ab2a47b | 2011-07-22 12:51:48 -0400 | [diff] [blame] | 366 | if (!dma_capable(dev, dev_addr, size)) { |
Alexander Duyck | 61ca08c | 2012-10-15 10:19:44 -0700 | [diff] [blame] | 367 | swiotlb_tbl_unmap_single(dev, map, size, dir); |
Konrad Rzeszutek Wilk | ab2a47b | 2011-07-22 12:51:48 -0400 | [diff] [blame] | 368 | dev_addr = 0; |
| 369 | } |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 370 | return dev_addr; |
| 371 | } |
| 372 | EXPORT_SYMBOL_GPL(xen_swiotlb_map_page); |
| 373 | |
| 374 | /* |
| 375 | * Unmap a single streaming mode DMA translation. The dma_addr and size must |
| 376 | * match what was provided for in a previous xen_swiotlb_map_page call. All |
| 377 | * other usages are undefined. |
| 378 | * |
| 379 | * After this call, reads by the cpu to the buffer are guaranteed to see |
| 380 | * whatever the device wrote there. |
| 381 | */ |
| 382 | static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, |
| 383 | size_t size, enum dma_data_direction dir) |
| 384 | { |
| 385 | phys_addr_t paddr = xen_bus_to_phys(dev_addr); |
| 386 | |
| 387 | BUG_ON(dir == DMA_NONE); |
| 388 | |
| 389 | /* NOTE: We use dev_addr here, not paddr! */ |
| 390 | if (is_xen_swiotlb_buffer(dev_addr)) { |
Alexander Duyck | 61ca08c | 2012-10-15 10:19:44 -0700 | [diff] [blame] | 391 | swiotlb_tbl_unmap_single(hwdev, paddr, size, dir); |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 392 | return; |
| 393 | } |
| 394 | |
| 395 | if (dir != DMA_FROM_DEVICE) |
| 396 | return; |
| 397 | |
| 398 | /* |
| 399 | * phys_to_virt doesn't work with hihgmem page but we could |
| 400 | * call dma_mark_clean() with hihgmem page here. However, we |
| 401 | * are fine since dma_mark_clean() is null on POWERPC. We can |
| 402 | * make dma_mark_clean() take a physical address if necessary. |
| 403 | */ |
| 404 | dma_mark_clean(phys_to_virt(paddr), size); |
| 405 | } |
| 406 | |
| 407 | void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
| 408 | size_t size, enum dma_data_direction dir, |
| 409 | struct dma_attrs *attrs) |
| 410 | { |
| 411 | xen_unmap_single(hwdev, dev_addr, size, dir); |
| 412 | } |
| 413 | EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page); |
| 414 | |
| 415 | /* |
| 416 | * Make physical memory consistent for a single streaming mode DMA translation |
| 417 | * after a transfer. |
| 418 | * |
| 419 | * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer |
| 420 | * using the cpu, yet do not wish to teardown the dma mapping, you must |
| 421 | * call this function before doing so. At the next point you give the dma |
| 422 | * address back to the card, you must first perform a |
| 423 | * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer |
| 424 | */ |
| 425 | static void |
| 426 | xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, |
| 427 | size_t size, enum dma_data_direction dir, |
| 428 | enum dma_sync_target target) |
| 429 | { |
| 430 | phys_addr_t paddr = xen_bus_to_phys(dev_addr); |
| 431 | |
| 432 | BUG_ON(dir == DMA_NONE); |
| 433 | |
| 434 | /* NOTE: We use dev_addr here, not paddr! */ |
| 435 | if (is_xen_swiotlb_buffer(dev_addr)) { |
Alexander Duyck | fbfda89 | 2012-10-15 10:19:49 -0700 | [diff] [blame] | 436 | swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 437 | return; |
| 438 | } |
| 439 | |
| 440 | if (dir != DMA_FROM_DEVICE) |
| 441 | return; |
| 442 | |
| 443 | dma_mark_clean(phys_to_virt(paddr), size); |
| 444 | } |
| 445 | |
| 446 | void |
| 447 | xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, |
| 448 | size_t size, enum dma_data_direction dir) |
| 449 | { |
| 450 | xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); |
| 451 | } |
| 452 | EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu); |
| 453 | |
| 454 | void |
| 455 | xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, |
| 456 | size_t size, enum dma_data_direction dir) |
| 457 | { |
| 458 | xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); |
| 459 | } |
| 460 | EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device); |
| 461 | |
| 462 | /* |
| 463 | * Map a set of buffers described by scatterlist in streaming mode for DMA. |
| 464 | * This is the scatter-gather version of the above xen_swiotlb_map_page |
| 465 | * interface. Here the scatter gather list elements are each tagged with the |
| 466 | * appropriate dma address and length. They are obtained via |
| 467 | * sg_dma_{address,length}(SG). |
| 468 | * |
| 469 | * NOTE: An implementation may be able to use a smaller number of |
| 470 | * DMA address/length pairs than there are SG table elements. |
| 471 | * (for example via virtual mapping capabilities) |
| 472 | * The routine returns the number of addr/length pairs actually |
| 473 | * used, at most nents. |
| 474 | * |
| 475 | * Device ownership issues as mentioned above for xen_swiotlb_map_page are the |
| 476 | * same here. |
| 477 | */ |
| 478 | int |
| 479 | xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
| 480 | int nelems, enum dma_data_direction dir, |
| 481 | struct dma_attrs *attrs) |
| 482 | { |
| 483 | struct scatterlist *sg; |
| 484 | int i; |
| 485 | |
| 486 | BUG_ON(dir == DMA_NONE); |
| 487 | |
| 488 | for_each_sg(sgl, sg, nelems, i) { |
| 489 | phys_addr_t paddr = sg_phys(sg); |
| 490 | dma_addr_t dev_addr = xen_phys_to_bus(paddr); |
| 491 | |
| 492 | if (swiotlb_force || |
| 493 | !dma_capable(hwdev, dev_addr, sg->length) || |
| 494 | range_straddles_page_boundary(paddr, sg->length)) { |
Alexander Duyck | e05ed4d | 2012-10-15 10:19:39 -0700 | [diff] [blame] | 495 | phys_addr_t map = swiotlb_tbl_map_single(hwdev, |
| 496 | start_dma_addr, |
| 497 | sg_phys(sg), |
| 498 | sg->length, |
| 499 | dir); |
| 500 | if (map == SWIOTLB_MAP_ERROR) { |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 501 | /* Don't panic here, we expect map_sg users |
| 502 | to do proper error handling. */ |
| 503 | xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, |
| 504 | attrs); |
| 505 | sgl[0].dma_length = 0; |
| 506 | return DMA_ERROR_CODE; |
| 507 | } |
Alexander Duyck | e05ed4d | 2012-10-15 10:19:39 -0700 | [diff] [blame] | 508 | sg->dma_address = xen_phys_to_bus(map); |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 509 | } else |
| 510 | sg->dma_address = dev_addr; |
| 511 | sg->dma_length = sg->length; |
| 512 | } |
| 513 | return nelems; |
| 514 | } |
| 515 | EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs); |
| 516 | |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 517 | /* |
| 518 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules |
| 519 | * concerning calls here are the same as for swiotlb_unmap_page() above. |
| 520 | */ |
| 521 | void |
| 522 | xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
| 523 | int nelems, enum dma_data_direction dir, |
| 524 | struct dma_attrs *attrs) |
| 525 | { |
| 526 | struct scatterlist *sg; |
| 527 | int i; |
| 528 | |
| 529 | BUG_ON(dir == DMA_NONE); |
| 530 | |
| 531 | for_each_sg(sgl, sg, nelems, i) |
| 532 | xen_unmap_single(hwdev, sg->dma_address, sg->dma_length, dir); |
| 533 | |
| 534 | } |
| 535 | EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs); |
| 536 | |
Konrad Rzeszutek Wilk | b097186 | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 537 | /* |
| 538 | * Make physical memory consistent for a set of streaming mode DMA translations |
| 539 | * after a transfer. |
| 540 | * |
| 541 | * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules |
| 542 | * and usage. |
| 543 | */ |
| 544 | static void |
| 545 | xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, |
| 546 | int nelems, enum dma_data_direction dir, |
| 547 | enum dma_sync_target target) |
| 548 | { |
| 549 | struct scatterlist *sg; |
| 550 | int i; |
| 551 | |
| 552 | for_each_sg(sgl, sg, nelems, i) |
| 553 | xen_swiotlb_sync_single(hwdev, sg->dma_address, |
| 554 | sg->dma_length, dir, target); |
| 555 | } |
| 556 | |
| 557 | void |
| 558 | xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
| 559 | int nelems, enum dma_data_direction dir) |
| 560 | { |
| 561 | xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); |
| 562 | } |
| 563 | EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu); |
| 564 | |
| 565 | void |
| 566 | xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
| 567 | int nelems, enum dma_data_direction dir) |
| 568 | { |
| 569 | xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); |
| 570 | } |
| 571 | EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device); |
| 572 | |
| 573 | int |
| 574 | xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) |
| 575 | { |
| 576 | return !dma_addr; |
| 577 | } |
| 578 | EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error); |
| 579 | |
| 580 | /* |
| 581 | * Return whether the given device DMA address mask can be supported |
| 582 | * properly. For example, if your device can only drive the low 24-bits |
| 583 | * during bus mastering, then you would pass 0x00ffffff as the mask to |
| 584 | * this function. |
| 585 | */ |
| 586 | int |
| 587 | xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) |
| 588 | { |
| 589 | return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask; |
| 590 | } |
| 591 | EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported); |