Stefano Stabellini | 5121872 | 2014-11-21 11:06:39 +0000 | [diff] [blame] | 1 | #include <linux/cpu.h> |
| 2 | #include <linux/dma-mapping.h> |
Stefano Stabellini | 83862cc | 2013-10-10 13:40:44 +0000 | [diff] [blame] | 3 | #include <linux/bootmem.h> |
| 4 | #include <linux/gfp.h> |
Stefano Stabellini | 5121872 | 2014-11-21 11:06:39 +0000 | [diff] [blame] | 5 | #include <linux/highmem.h> |
Stefano Stabellini | 83862cc | 2013-10-10 13:40:44 +0000 | [diff] [blame] | 6 | #include <linux/export.h> |
Stefano Stabellini | 8746515 | 2015-04-24 10:16:40 +0100 | [diff] [blame] | 7 | #include <linux/memblock.h> |
Stefano Stabellini | 5121872 | 2014-11-21 11:06:39 +0000 | [diff] [blame] | 8 | #include <linux/of_address.h> |
Stefano Stabellini | 83862cc | 2013-10-10 13:40:44 +0000 | [diff] [blame] | 9 | #include <linux/slab.h> |
| 10 | #include <linux/types.h> |
| 11 | #include <linux/dma-mapping.h> |
| 12 | #include <linux/vmalloc.h> |
| 13 | #include <linux/swiotlb.h> |
| 14 | |
| 15 | #include <xen/xen.h> |
Stefano Stabellini | da095a9 | 2014-11-21 11:08:39 +0000 | [diff] [blame] | 16 | #include <xen/interface/grant_table.h> |
Stefano Stabellini | 83862cc | 2013-10-10 13:40:44 +0000 | [diff] [blame] | 17 | #include <xen/interface/memory.h> |
Julien Grall | a9fd60e | 2015-06-17 15:28:02 +0100 | [diff] [blame] | 18 | #include <xen/page.h> |
Stefano Stabellini | 83862cc | 2013-10-10 13:40:44 +0000 | [diff] [blame] | 19 | #include <xen/swiotlb-xen.h> |
| 20 | |
| 21 | #include <asm/cacheflush.h> |
Stefano Stabellini | 83862cc | 2013-10-10 13:40:44 +0000 | [diff] [blame] | 22 | #include <asm/xen/hypercall.h> |
| 23 | #include <asm/xen/interface.h> |
| 24 | |
Stefano Stabellini | 8746515 | 2015-04-24 10:16:40 +0100 | [diff] [blame] | 25 | unsigned long xen_get_swiotlb_free_pages(unsigned int order) |
| 26 | { |
| 27 | struct memblock_region *reg; |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 28 | gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM; |
Stefano Stabellini | 8746515 | 2015-04-24 10:16:40 +0100 | [diff] [blame] | 29 | |
| 30 | for_each_memblock(memory, reg) { |
| 31 | if (reg->base < (phys_addr_t)0xffffffff) { |
| 32 | flags |= __GFP_DMA; |
| 33 | break; |
| 34 | } |
| 35 | } |
| 36 | return __get_free_pages(flags, order); |
| 37 | } |
| 38 | |
Stefano Stabellini | 5121872 | 2014-11-21 11:06:39 +0000 | [diff] [blame] | 39 | enum dma_cache_op { |
| 40 | DMA_UNMAP, |
| 41 | DMA_MAP, |
| 42 | }; |
Stefano Stabellini | da095a9 | 2014-11-21 11:08:39 +0000 | [diff] [blame] | 43 | static bool hypercall_cflush = false; |
Stefano Stabellini | 5121872 | 2014-11-21 11:06:39 +0000 | [diff] [blame] | 44 | |
| 45 | /* functions called by SWIOTLB */ |
| 46 | |
| 47 | static void dma_cache_maint(dma_addr_t handle, unsigned long offset, |
| 48 | size_t size, enum dma_data_direction dir, enum dma_cache_op op) |
| 49 | { |
Stefano Stabellini | da095a9 | 2014-11-21 11:08:39 +0000 | [diff] [blame] | 50 | struct gnttab_cache_flush cflush; |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 51 | unsigned long xen_pfn; |
Stefano Stabellini | 5121872 | 2014-11-21 11:06:39 +0000 | [diff] [blame] | 52 | size_t left = size; |
| 53 | |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 54 | xen_pfn = (handle >> XEN_PAGE_SHIFT) + offset / XEN_PAGE_SIZE; |
| 55 | offset %= XEN_PAGE_SIZE; |
Stefano Stabellini | 5121872 | 2014-11-21 11:06:39 +0000 | [diff] [blame] | 56 | |
| 57 | do { |
| 58 | size_t len = left; |
| 59 | |
Stefano Stabellini | da095a9 | 2014-11-21 11:08:39 +0000 | [diff] [blame] | 60 | /* buffers in highmem or foreign pages cannot cross page |
| 61 | * boundaries */ |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 62 | if (len + offset > XEN_PAGE_SIZE) |
| 63 | len = XEN_PAGE_SIZE - offset; |
Stefano Stabellini | da095a9 | 2014-11-21 11:08:39 +0000 | [diff] [blame] | 64 | |
| 65 | cflush.op = 0; |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 66 | cflush.a.dev_bus_addr = xen_pfn << XEN_PAGE_SHIFT; |
Stefano Stabellini | da095a9 | 2014-11-21 11:08:39 +0000 | [diff] [blame] | 67 | cflush.offset = offset; |
| 68 | cflush.length = len; |
| 69 | |
| 70 | if (op == DMA_UNMAP && dir != DMA_TO_DEVICE) |
| 71 | cflush.op = GNTTAB_CACHE_INVAL; |
| 72 | if (op == DMA_MAP) { |
| 73 | if (dir == DMA_FROM_DEVICE) |
| 74 | cflush.op = GNTTAB_CACHE_INVAL; |
| 75 | else |
| 76 | cflush.op = GNTTAB_CACHE_CLEAN; |
| 77 | } |
| 78 | if (cflush.op) |
| 79 | HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1); |
Stefano Stabellini | 5121872 | 2014-11-21 11:06:39 +0000 | [diff] [blame] | 80 | |
| 81 | offset = 0; |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 82 | xen_pfn++; |
Stefano Stabellini | 5121872 | 2014-11-21 11:06:39 +0000 | [diff] [blame] | 83 | left -= len; |
| 84 | } while (left); |
| 85 | } |
| 86 | |
| 87 | static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle, |
| 88 | size_t size, enum dma_data_direction dir) |
| 89 | { |
| 90 | dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP); |
| 91 | } |
| 92 | |
| 93 | static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle, |
| 94 | size_t size, enum dma_data_direction dir) |
| 95 | { |
| 96 | dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP); |
| 97 | } |
| 98 | |
| 99 | void __xen_dma_map_page(struct device *hwdev, struct page *page, |
| 100 | dma_addr_t dev_addr, unsigned long offset, size_t size, |
| 101 | enum dma_data_direction dir, struct dma_attrs *attrs) |
| 102 | { |
| 103 | if (is_device_dma_coherent(hwdev)) |
| 104 | return; |
| 105 | if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) |
| 106 | return; |
| 107 | |
| 108 | __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir); |
| 109 | } |
| 110 | |
| 111 | void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, |
| 112 | size_t size, enum dma_data_direction dir, |
| 113 | struct dma_attrs *attrs) |
| 114 | |
| 115 | { |
| 116 | if (is_device_dma_coherent(hwdev)) |
| 117 | return; |
| 118 | if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) |
| 119 | return; |
| 120 | |
| 121 | __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); |
| 122 | } |
| 123 | |
| 124 | void __xen_dma_sync_single_for_cpu(struct device *hwdev, |
| 125 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
| 126 | { |
| 127 | if (is_device_dma_coherent(hwdev)) |
| 128 | return; |
| 129 | __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); |
| 130 | } |
| 131 | |
| 132 | void __xen_dma_sync_single_for_device(struct device *hwdev, |
| 133 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
| 134 | { |
| 135 | if (is_device_dma_coherent(hwdev)) |
| 136 | return; |
| 137 | __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir); |
| 138 | } |
| 139 | |
Stefano Stabellini | a4dba13 | 2014-11-21 11:07:39 +0000 | [diff] [blame] | 140 | bool xen_arch_need_swiotlb(struct device *dev, |
Julien Grall | 291be10 | 2015-09-09 15:17:33 +0100 | [diff] [blame] | 141 | phys_addr_t phys, |
| 142 | dma_addr_t dev_addr) |
Stefano Stabellini | a4dba13 | 2014-11-21 11:07:39 +0000 | [diff] [blame] | 143 | { |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 144 | unsigned int xen_pfn = XEN_PFN_DOWN(phys); |
| 145 | unsigned int bfn = XEN_PFN_DOWN(dev_addr); |
Julien Grall | 291be10 | 2015-09-09 15:17:33 +0100 | [diff] [blame] | 146 | |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 147 | /* |
| 148 | * The swiotlb buffer should be used if |
| 149 | * - Xen doesn't have the cache flush hypercall |
| 150 | * - The Linux page refers to foreign memory |
| 151 | * - The device doesn't support coherent DMA request |
| 152 | * |
| 153 | * The Linux page may be spanned acrros multiple Xen page, although |
| 154 | * it's not possible to have a mix of local and foreign Xen page. |
| 155 | * Furthermore, range_straddles_page_boundary is already checking |
| 156 | * if buffer is physically contiguous in the host RAM. |
| 157 | * |
| 158 | * Therefore we only need to check the first Xen page to know if we |
| 159 | * require a bounce buffer because the device doesn't support coherent |
| 160 | * memory and we are not able to flush the cache. |
| 161 | */ |
| 162 | return (!hypercall_cflush && (xen_pfn != bfn) && |
| 163 | !is_device_dma_coherent(dev)); |
Stefano Stabellini | a4dba13 | 2014-11-21 11:07:39 +0000 | [diff] [blame] | 164 | } |
| 165 | |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 166 | int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, |
Stefano Stabellini | 83862cc | 2013-10-10 13:40:44 +0000 | [diff] [blame] | 167 | unsigned int address_bits, |
| 168 | dma_addr_t *dma_handle) |
| 169 | { |
| 170 | if (!xen_initial_domain()) |
| 171 | return -EINVAL; |
| 172 | |
| 173 | /* we assume that dom0 is mapped 1:1 for now */ |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 174 | *dma_handle = pstart; |
Stefano Stabellini | 83862cc | 2013-10-10 13:40:44 +0000 | [diff] [blame] | 175 | return 0; |
| 176 | } |
| 177 | EXPORT_SYMBOL_GPL(xen_create_contiguous_region); |
| 178 | |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 179 | void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) |
Stefano Stabellini | 83862cc | 2013-10-10 13:40:44 +0000 | [diff] [blame] | 180 | { |
| 181 | return; |
| 182 | } |
| 183 | EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); |
| 184 | |
| 185 | struct dma_map_ops *xen_dma_ops; |
Chuck Tuffli | 35c8ab4 | 2014-12-21 12:30:58 -0800 | [diff] [blame] | 186 | EXPORT_SYMBOL(xen_dma_ops); |
Stefano Stabellini | 83862cc | 2013-10-10 13:40:44 +0000 | [diff] [blame] | 187 | |
| 188 | static struct dma_map_ops xen_swiotlb_dma_ops = { |
| 189 | .mapping_error = xen_swiotlb_dma_mapping_error, |
| 190 | .alloc = xen_swiotlb_alloc_coherent, |
| 191 | .free = xen_swiotlb_free_coherent, |
| 192 | .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, |
| 193 | .sync_single_for_device = xen_swiotlb_sync_single_for_device, |
| 194 | .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, |
| 195 | .sync_sg_for_device = xen_swiotlb_sync_sg_for_device, |
| 196 | .map_sg = xen_swiotlb_map_sg_attrs, |
| 197 | .unmap_sg = xen_swiotlb_unmap_sg_attrs, |
| 198 | .map_page = xen_swiotlb_map_page, |
| 199 | .unmap_page = xen_swiotlb_unmap_page, |
| 200 | .dma_supported = xen_swiotlb_dma_supported, |
Stefano Stabellini | eb1ddc0 | 2013-10-09 16:56:33 +0000 | [diff] [blame] | 201 | .set_dma_mask = xen_swiotlb_set_dma_mask, |
Stefano Stabellini | 83862cc | 2013-10-10 13:40:44 +0000 | [diff] [blame] | 202 | }; |
| 203 | |
| 204 | int __init xen_mm_init(void) |
| 205 | { |
Stefano Stabellini | da095a9 | 2014-11-21 11:08:39 +0000 | [diff] [blame] | 206 | struct gnttab_cache_flush cflush; |
Stefano Stabellini | 83862cc | 2013-10-10 13:40:44 +0000 | [diff] [blame] | 207 | if (!xen_initial_domain()) |
| 208 | return 0; |
| 209 | xen_swiotlb_init(1, false); |
| 210 | xen_dma_ops = &xen_swiotlb_dma_ops; |
Stefano Stabellini | da095a9 | 2014-11-21 11:08:39 +0000 | [diff] [blame] | 211 | |
| 212 | cflush.op = 0; |
| 213 | cflush.a.dev_bus_addr = 0; |
| 214 | cflush.offset = 0; |
| 215 | cflush.length = 0; |
| 216 | if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS) |
| 217 | hypercall_cflush = true; |
Stefano Stabellini | 83862cc | 2013-10-10 13:40:44 +0000 | [diff] [blame] | 218 | return 0; |
| 219 | } |
| 220 | arch_initcall(xen_mm_init); |