blob: 7461edb5118eb2fabd60f8bfb3eafcb4a2acd960 [file] [log] [blame]
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -04001/*
2 * Copyright 2010
3 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
4 *
5 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License v2.0 as published by
9 * the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * PV guests under Xen are running in an non-contiguous memory architecture.
17 *
18 * When PCI pass-through is utilized, this necessitates an IOMMU for
19 * translating bus (DMA) to virtual and vice-versa and also providing a
20 * mechanism to have contiguous pages for device drivers operations (say DMA
21 * operations).
22 *
23 * Specifically, under Xen the Linux idea of pages is an illusion. It
24 * assumes that pages start at zero and go up to the available memory. To
25 * help with that, the Linux Xen MMU provides a lookup mechanism to
26 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
27 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
28 * memory is not contiguous. Xen hypervisor stitches memory for guests
29 * from different pools, which means there is no guarantee that PFN==MFN
30 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
31 * allocated in descending order (high to low), meaning the guest might
32 * never get any MFN's under the 4GB mark.
33 *
34 */
35
36#include <linux/bootmem.h>
37#include <linux/dma-mapping.h>
Paul Gortmaker63c97442011-07-10 13:22:07 -040038#include <linux/export.h>
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -040039#include <xen/swiotlb-xen.h>
40#include <xen/page.h>
41#include <xen/xen-ops.h>
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -040042#include <xen/hvc-console.h>
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -040043/*
44 * Used to do a quick range check in swiotlb_tbl_unmap_single and
45 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
46 * API.
47 */
48
49static char *xen_io_tlb_start, *xen_io_tlb_end;
50static unsigned long xen_io_tlb_nslabs;
51/*
52 * Quick lookup value of the bus address of the IOTLB.
53 */
54
55u64 start_dma_addr;
56
57static dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
58{
Justin P. Mattock6eab04a2011-04-08 19:49:08 -070059 return phys_to_machine(XPADDR(paddr)).maddr;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -040060}
61
62static phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
63{
64 return machine_to_phys(XMADDR(baddr)).paddr;
65}
66
67static dma_addr_t xen_virt_to_bus(void *address)
68{
69 return xen_phys_to_bus(virt_to_phys(address));
70}
71
72static int check_pages_physically_contiguous(unsigned long pfn,
73 unsigned int offset,
74 size_t length)
75{
76 unsigned long next_mfn;
77 int i;
78 int nr_pages;
79
80 next_mfn = pfn_to_mfn(pfn);
81 nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
82
83 for (i = 1; i < nr_pages; i++) {
84 if (pfn_to_mfn(++pfn) != ++next_mfn)
85 return 0;
86 }
87 return 1;
88}
89
90static int range_straddles_page_boundary(phys_addr_t p, size_t size)
91{
92 unsigned long pfn = PFN_DOWN(p);
93 unsigned int offset = p & ~PAGE_MASK;
94
95 if (offset + size <= PAGE_SIZE)
96 return 0;
97 if (check_pages_physically_contiguous(pfn, offset, size))
98 return 0;
99 return 1;
100}
101
102static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
103{
104 unsigned long mfn = PFN_DOWN(dma_addr);
105 unsigned long pfn = mfn_to_local_pfn(mfn);
106 phys_addr_t paddr;
107
108 /* If the address is outside our domain, it CAN
109 * have the same virtual address as another address
110 * in our domain. Therefore _only_ check address within our domain.
111 */
112 if (pfn_valid(pfn)) {
113 paddr = PFN_PHYS(pfn);
114 return paddr >= virt_to_phys(xen_io_tlb_start) &&
115 paddr < virt_to_phys(xen_io_tlb_end);
116 }
117 return 0;
118}
119
120static int max_dma_bits = 32;
121
122static int
123xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
124{
125 int i, rc;
126 int dma_bits;
127
128 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
129
130 i = 0;
131 do {
132 int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
133
134 do {
135 rc = xen_create_contiguous_region(
136 (unsigned long)buf + (i << IO_TLB_SHIFT),
137 get_order(slabs << IO_TLB_SHIFT),
138 dma_bits);
139 } while (rc && dma_bits++ < max_dma_bits);
140 if (rc)
141 return rc;
142
143 i += slabs;
144 } while (i < nslabs);
145 return 0;
146}
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400147static unsigned long xen_set_nslabs(unsigned long nr_tbl)
148{
149 if (!nr_tbl) {
150 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
151 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
152 } else
153 xen_io_tlb_nslabs = nr_tbl;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400154
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400155 return xen_io_tlb_nslabs << IO_TLB_SHIFT;
156}
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400157
158enum xen_swiotlb_err {
159 XEN_SWIOTLB_UNKNOWN = 0,
160 XEN_SWIOTLB_ENOMEM,
161 XEN_SWIOTLB_EFIXUP
162};
163
164static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
165{
166 switch (err) {
167 case XEN_SWIOTLB_ENOMEM:
168 return "Cannot allocate Xen-SWIOTLB buffer\n";
169 case XEN_SWIOTLB_EFIXUP:
170 return "Failed to get contiguous memory for DMA from Xen!\n"\
171 "You either: don't have the permissions, do not have"\
172 " enough free memory under 4GB, or the hypervisor memory"\
173 " is too fragmented!";
174 default:
175 break;
176 }
177 return "";
178}
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400179int __ref xen_swiotlb_init(int verbose, bool early)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400180{
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400181 unsigned long bytes, order;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400182 int rc = -ENOMEM;
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400183 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400184 unsigned int repeat = 3;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400185
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400186 xen_io_tlb_nslabs = swiotlb_nr_tbl();
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400187retry:
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400188 bytes = xen_set_nslabs(xen_io_tlb_nslabs);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400189 order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400190 /*
191 * Get IO TLB memory from any location.
192 */
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400193 if (early)
194 xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
195 else {
196#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
197#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
198 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
199 xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order);
200 if (xen_io_tlb_start)
201 break;
202 order--;
203 }
204 if (order != get_order(bytes)) {
205 pr_warn("Warning: only able to allocate %ld MB "
206 "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
207 xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
208 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
209 }
210 }
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400211 if (!xen_io_tlb_start) {
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400212 m_ret = XEN_SWIOTLB_ENOMEM;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400213 goto error;
214 }
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400215 xen_io_tlb_end = xen_io_tlb_start + bytes;
216 /*
217 * And replace that memory with pages under 4GB.
218 */
219 rc = xen_swiotlb_fixup(xen_io_tlb_start,
220 bytes,
221 xen_io_tlb_nslabs);
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400222 if (rc) {
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400223 if (early)
224 free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
225 else {
226 free_pages((unsigned long)xen_io_tlb_start, order);
227 xen_io_tlb_start = NULL;
228 }
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400229 m_ret = XEN_SWIOTLB_EFIXUP;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400230 goto error;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400231 }
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400232 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400233 if (early)
234 swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);
235 else
236 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
237 return rc;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400238error:
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400239 if (repeat--) {
240 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
241 (xen_io_tlb_nslabs >> 1));
242 printk(KERN_INFO "Xen-SWIOTLB: Lowering to %luMB\n",
243 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
244 goto retry;
245 }
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400246 pr_err("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
247 if (early)
248 panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
249 else
250 free_pages((unsigned long)xen_io_tlb_start, order);
251 return rc;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400252}
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400253void *
254xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +0200255 dma_addr_t *dma_handle, gfp_t flags,
256 struct dma_attrs *attrs)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400257{
258 void *ret;
259 int order = get_order(size);
260 u64 dma_mask = DMA_BIT_MASK(32);
261 unsigned long vstart;
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400262 phys_addr_t phys;
263 dma_addr_t dev_addr;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400264
265 /*
266 * Ignore region specifiers - the kernel's ideas of
267 * pseudo-phys memory layout has nothing to do with the
268 * machine physical layout. We can't allocate highmem
269 * because we can't return a pointer to it.
270 */
271 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
272
273 if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
274 return ret;
275
276 vstart = __get_free_pages(flags, order);
277 ret = (void *)vstart;
278
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400279 if (!ret)
280 return ret;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400281
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400282 if (hwdev && hwdev->coherent_dma_mask)
283 dma_mask = hwdev->coherent_dma_mask;
284
285 phys = virt_to_phys(ret);
286 dev_addr = xen_phys_to_bus(phys);
287 if (((dev_addr + size - 1 <= dma_mask)) &&
288 !range_straddles_page_boundary(phys, size))
289 *dma_handle = dev_addr;
290 else {
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400291 if (xen_create_contiguous_region(vstart, order,
292 fls64(dma_mask)) != 0) {
293 free_pages(vstart, order);
294 return NULL;
295 }
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400296 *dma_handle = virt_to_machine(ret).maddr;
297 }
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400298 memset(ret, 0, size);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400299 return ret;
300}
301EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
302
303void
304xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +0200305 dma_addr_t dev_addr, struct dma_attrs *attrs)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400306{
307 int order = get_order(size);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400308 phys_addr_t phys;
309 u64 dma_mask = DMA_BIT_MASK(32);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400310
311 if (dma_release_from_coherent(hwdev, order, vaddr))
312 return;
313
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400314 if (hwdev && hwdev->coherent_dma_mask)
315 dma_mask = hwdev->coherent_dma_mask;
316
317 phys = virt_to_phys(vaddr);
318
319 if (((dev_addr + size - 1 > dma_mask)) ||
320 range_straddles_page_boundary(phys, size))
321 xen_destroy_contiguous_region((unsigned long)vaddr, order);
322
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400323 free_pages((unsigned long)vaddr, order);
324}
325EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
326
327
328/*
329 * Map a single buffer of the indicated size for DMA in streaming mode. The
330 * physical address to use is returned.
331 *
332 * Once the device is given the dma address, the device owns this memory until
333 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
334 */
335dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
336 unsigned long offset, size_t size,
337 enum dma_data_direction dir,
338 struct dma_attrs *attrs)
339{
340 phys_addr_t phys = page_to_phys(page) + offset;
341 dma_addr_t dev_addr = xen_phys_to_bus(phys);
342 void *map;
343
344 BUG_ON(dir == DMA_NONE);
345 /*
346 * If the address happens to be in the device's DMA window,
347 * we can safely return the device addr and not worry about bounce
348 * buffering it.
349 */
350 if (dma_capable(dev, dev_addr, size) &&
351 !range_straddles_page_boundary(phys, size) && !swiotlb_force)
352 return dev_addr;
353
354 /*
355 * Oh well, have to allocate and map a bounce buffer.
356 */
357 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
358 if (!map)
359 return DMA_ERROR_CODE;
360
361 dev_addr = xen_virt_to_bus(map);
362
363 /*
364 * Ensure that the address returned is DMA'ble
365 */
Konrad Rzeszutek Wilkab2a47b2011-07-22 12:51:48 -0400366 if (!dma_capable(dev, dev_addr, size)) {
367 swiotlb_tbl_unmap_single(dev, map, size, dir);
368 dev_addr = 0;
369 }
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400370 return dev_addr;
371}
372EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
373
374/*
375 * Unmap a single streaming mode DMA translation. The dma_addr and size must
376 * match what was provided for in a previous xen_swiotlb_map_page call. All
377 * other usages are undefined.
378 *
379 * After this call, reads by the cpu to the buffer are guaranteed to see
380 * whatever the device wrote there.
381 */
382static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
383 size_t size, enum dma_data_direction dir)
384{
385 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
386
387 BUG_ON(dir == DMA_NONE);
388
389 /* NOTE: We use dev_addr here, not paddr! */
390 if (is_xen_swiotlb_buffer(dev_addr)) {
391 swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
392 return;
393 }
394
395 if (dir != DMA_FROM_DEVICE)
396 return;
397
398 /*
399 * phys_to_virt doesn't work with hihgmem page but we could
400 * call dma_mark_clean() with hihgmem page here. However, we
401 * are fine since dma_mark_clean() is null on POWERPC. We can
402 * make dma_mark_clean() take a physical address if necessary.
403 */
404 dma_mark_clean(phys_to_virt(paddr), size);
405}
406
407void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
408 size_t size, enum dma_data_direction dir,
409 struct dma_attrs *attrs)
410{
411 xen_unmap_single(hwdev, dev_addr, size, dir);
412}
413EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
414
415/*
416 * Make physical memory consistent for a single streaming mode DMA translation
417 * after a transfer.
418 *
419 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
420 * using the cpu, yet do not wish to teardown the dma mapping, you must
421 * call this function before doing so. At the next point you give the dma
422 * address back to the card, you must first perform a
423 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
424 */
425static void
426xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
427 size_t size, enum dma_data_direction dir,
428 enum dma_sync_target target)
429{
430 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
431
432 BUG_ON(dir == DMA_NONE);
433
434 /* NOTE: We use dev_addr here, not paddr! */
435 if (is_xen_swiotlb_buffer(dev_addr)) {
436 swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
437 target);
438 return;
439 }
440
441 if (dir != DMA_FROM_DEVICE)
442 return;
443
444 dma_mark_clean(phys_to_virt(paddr), size);
445}
446
447void
448xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
449 size_t size, enum dma_data_direction dir)
450{
451 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
452}
453EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
454
455void
456xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
457 size_t size, enum dma_data_direction dir)
458{
459 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
460}
461EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
462
463/*
464 * Map a set of buffers described by scatterlist in streaming mode for DMA.
465 * This is the scatter-gather version of the above xen_swiotlb_map_page
466 * interface. Here the scatter gather list elements are each tagged with the
467 * appropriate dma address and length. They are obtained via
468 * sg_dma_{address,length}(SG).
469 *
470 * NOTE: An implementation may be able to use a smaller number of
471 * DMA address/length pairs than there are SG table elements.
472 * (for example via virtual mapping capabilities)
473 * The routine returns the number of addr/length pairs actually
474 * used, at most nents.
475 *
476 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
477 * same here.
478 */
479int
480xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
481 int nelems, enum dma_data_direction dir,
482 struct dma_attrs *attrs)
483{
484 struct scatterlist *sg;
485 int i;
486
487 BUG_ON(dir == DMA_NONE);
488
489 for_each_sg(sgl, sg, nelems, i) {
490 phys_addr_t paddr = sg_phys(sg);
491 dma_addr_t dev_addr = xen_phys_to_bus(paddr);
492
493 if (swiotlb_force ||
494 !dma_capable(hwdev, dev_addr, sg->length) ||
495 range_straddles_page_boundary(paddr, sg->length)) {
496 void *map = swiotlb_tbl_map_single(hwdev,
497 start_dma_addr,
498 sg_phys(sg),
499 sg->length, dir);
500 if (!map) {
501 /* Don't panic here, we expect map_sg users
502 to do proper error handling. */
503 xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
504 attrs);
505 sgl[0].dma_length = 0;
506 return DMA_ERROR_CODE;
507 }
508 sg->dma_address = xen_virt_to_bus(map);
509 } else
510 sg->dma_address = dev_addr;
511 sg->dma_length = sg->length;
512 }
513 return nelems;
514}
515EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
516
517int
518xen_swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
519 enum dma_data_direction dir)
520{
521 return xen_swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
522}
523EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg);
524
525/*
526 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
527 * concerning calls here are the same as for swiotlb_unmap_page() above.
528 */
529void
530xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
531 int nelems, enum dma_data_direction dir,
532 struct dma_attrs *attrs)
533{
534 struct scatterlist *sg;
535 int i;
536
537 BUG_ON(dir == DMA_NONE);
538
539 for_each_sg(sgl, sg, nelems, i)
540 xen_unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
541
542}
543EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
544
545void
546xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
547 enum dma_data_direction dir)
548{
549 return xen_swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
550}
551EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg);
552
553/*
554 * Make physical memory consistent for a set of streaming mode DMA translations
555 * after a transfer.
556 *
557 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
558 * and usage.
559 */
560static void
561xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
562 int nelems, enum dma_data_direction dir,
563 enum dma_sync_target target)
564{
565 struct scatterlist *sg;
566 int i;
567
568 for_each_sg(sgl, sg, nelems, i)
569 xen_swiotlb_sync_single(hwdev, sg->dma_address,
570 sg->dma_length, dir, target);
571}
572
573void
574xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
575 int nelems, enum dma_data_direction dir)
576{
577 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
578}
579EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
580
581void
582xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
583 int nelems, enum dma_data_direction dir)
584{
585 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
586}
587EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
588
589int
590xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
591{
592 return !dma_addr;
593}
594EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error);
595
596/*
597 * Return whether the given device DMA address mask can be supported
598 * properly. For example, if your device can only drive the low 24-bits
599 * during bus mastering, then you would pass 0x00ffffff as the mask to
600 * this function.
601 */
602int
603xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
604{
605 return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
606}
607EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);