blob: b31081007a810cfcaf81d47d8ea07eae41eb8a00 [file] [log] [blame]
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -04001/*
2 * Copyright 2010
3 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
4 *
5 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License v2.0 as published by
9 * the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * PV guests under Xen are running in an non-contiguous memory architecture.
17 *
18 * When PCI pass-through is utilized, this necessitates an IOMMU for
19 * translating bus (DMA) to virtual and vice-versa and also providing a
20 * mechanism to have contiguous pages for device drivers operations (say DMA
21 * operations).
22 *
23 * Specifically, under Xen the Linux idea of pages is an illusion. It
24 * assumes that pages start at zero and go up to the available memory. To
25 * help with that, the Linux Xen MMU provides a lookup mechanism to
26 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
27 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
28 * memory is not contiguous. Xen hypervisor stitches memory for guests
29 * from different pools, which means there is no guarantee that PFN==MFN
30 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
31 * allocated in descending order (high to low), meaning the guest might
32 * never get any MFN's under the 4GB mark.
33 *
34 */
35
Joe Perches283c0972013-06-28 03:21:41 -070036#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
37
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -040038#include <linux/bootmem.h>
39#include <linux/dma-mapping.h>
Paul Gortmaker63c97442011-07-10 13:22:07 -040040#include <linux/export.h>
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -040041#include <xen/swiotlb-xen.h>
42#include <xen/page.h>
43#include <xen/xen-ops.h>
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -040044#include <xen/hvc-console.h>
Zoltan Kiss2b2b6142013-09-04 21:11:05 +010045
46#define CREATE_TRACE_POINTS
47#include <trace/events/swiotlb.h>
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -040048/*
49 * Used to do a quick range check in swiotlb_tbl_unmap_single and
50 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
51 * API.
52 */
53
54static char *xen_io_tlb_start, *xen_io_tlb_end;
55static unsigned long xen_io_tlb_nslabs;
56/*
57 * Quick lookup value of the bus address of the IOTLB.
58 */
59
Konrad Rzeszutek Wilkb8b0f552012-08-21 14:49:34 -040060static u64 start_dma_addr;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -040061
62static dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
63{
Justin P. Mattock6eab04a2011-04-08 19:49:08 -070064 return phys_to_machine(XPADDR(paddr)).maddr;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -040065}
66
67static phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
68{
69 return machine_to_phys(XMADDR(baddr)).paddr;
70}
71
72static dma_addr_t xen_virt_to_bus(void *address)
73{
74 return xen_phys_to_bus(virt_to_phys(address));
75}
76
77static int check_pages_physically_contiguous(unsigned long pfn,
78 unsigned int offset,
79 size_t length)
80{
81 unsigned long next_mfn;
82 int i;
83 int nr_pages;
84
85 next_mfn = pfn_to_mfn(pfn);
86 nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
87
88 for (i = 1; i < nr_pages; i++) {
89 if (pfn_to_mfn(++pfn) != ++next_mfn)
90 return 0;
91 }
92 return 1;
93}
94
95static int range_straddles_page_boundary(phys_addr_t p, size_t size)
96{
97 unsigned long pfn = PFN_DOWN(p);
98 unsigned int offset = p & ~PAGE_MASK;
99
100 if (offset + size <= PAGE_SIZE)
101 return 0;
102 if (check_pages_physically_contiguous(pfn, offset, size))
103 return 0;
104 return 1;
105}
106
107static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
108{
109 unsigned long mfn = PFN_DOWN(dma_addr);
110 unsigned long pfn = mfn_to_local_pfn(mfn);
111 phys_addr_t paddr;
112
113 /* If the address is outside our domain, it CAN
114 * have the same virtual address as another address
115 * in our domain. Therefore _only_ check address within our domain.
116 */
117 if (pfn_valid(pfn)) {
118 paddr = PFN_PHYS(pfn);
119 return paddr >= virt_to_phys(xen_io_tlb_start) &&
120 paddr < virt_to_phys(xen_io_tlb_end);
121 }
122 return 0;
123}
124
125static int max_dma_bits = 32;
126
127static int
128xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
129{
130 int i, rc;
131 int dma_bits;
132
133 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
134
135 i = 0;
136 do {
137 int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
138
139 do {
140 rc = xen_create_contiguous_region(
141 (unsigned long)buf + (i << IO_TLB_SHIFT),
142 get_order(slabs << IO_TLB_SHIFT),
143 dma_bits);
144 } while (rc && dma_bits++ < max_dma_bits);
145 if (rc)
146 return rc;
147
148 i += slabs;
149 } while (i < nslabs);
150 return 0;
151}
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400152static unsigned long xen_set_nslabs(unsigned long nr_tbl)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400153{
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400154 if (!nr_tbl) {
FUJITA Tomonori5f98ecd2011-06-05 11:47:29 +0900155 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
156 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400157 } else
158 xen_io_tlb_nslabs = nr_tbl;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400159
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400160 return xen_io_tlb_nslabs << IO_TLB_SHIFT;
161}
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400162
163enum xen_swiotlb_err {
164 XEN_SWIOTLB_UNKNOWN = 0,
165 XEN_SWIOTLB_ENOMEM,
166 XEN_SWIOTLB_EFIXUP
167};
168
169static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
170{
171 switch (err) {
172 case XEN_SWIOTLB_ENOMEM:
173 return "Cannot allocate Xen-SWIOTLB buffer\n";
174 case XEN_SWIOTLB_EFIXUP:
175 return "Failed to get contiguous memory for DMA from Xen!\n"\
176 "You either: don't have the permissions, do not have"\
177 " enough free memory under 4GB, or the hypervisor memory"\
178 " is too fragmented!";
179 default:
180 break;
181 }
182 return "";
183}
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400184int __ref xen_swiotlb_init(int verbose, bool early)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400185{
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400186 unsigned long bytes, order;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400187 int rc = -ENOMEM;
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400188 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400189 unsigned int repeat = 3;
190
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400191 xen_io_tlb_nslabs = swiotlb_nr_tbl();
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400192retry:
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400193 bytes = xen_set_nslabs(xen_io_tlb_nslabs);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400194 order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400195 /*
196 * Get IO TLB memory from any location.
197 */
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400198 if (early)
199 xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
200 else {
201#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
202#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
203 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
204 xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order);
205 if (xen_io_tlb_start)
206 break;
207 order--;
208 }
209 if (order != get_order(bytes)) {
Joe Perches283c0972013-06-28 03:21:41 -0700210 pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
211 (PAGE_SIZE << order) >> 20);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400212 xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
213 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
214 }
215 }
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400216 if (!xen_io_tlb_start) {
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400217 m_ret = XEN_SWIOTLB_ENOMEM;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400218 goto error;
219 }
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400220 xen_io_tlb_end = xen_io_tlb_start + bytes;
221 /*
222 * And replace that memory with pages under 4GB.
223 */
224 rc = xen_swiotlb_fixup(xen_io_tlb_start,
225 bytes,
226 xen_io_tlb_nslabs);
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400227 if (rc) {
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400228 if (early)
229 free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
230 else {
231 free_pages((unsigned long)xen_io_tlb_start, order);
232 xen_io_tlb_start = NULL;
233 }
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400234 m_ret = XEN_SWIOTLB_EFIXUP;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400235 goto error;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400236 }
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400237 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
Konrad Rzeszutek Wilkc468bde2012-09-17 10:20:09 -0400238 if (early) {
Yinghai Luac2cbab2013-01-24 12:20:16 -0800239 if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
240 verbose))
241 panic("Cannot allocate SWIOTLB buffer");
Konrad Rzeszutek Wilkc468bde2012-09-17 10:20:09 -0400242 rc = 0;
243 } else
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400244 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
245 return rc;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400246error:
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400247 if (repeat--) {
248 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
249 (xen_io_tlb_nslabs >> 1));
Joe Perches283c0972013-06-28 03:21:41 -0700250 pr_info("Lowering to %luMB\n",
251 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400252 goto retry;
253 }
Joe Perches283c0972013-06-28 03:21:41 -0700254 pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400255 if (early)
256 panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
257 else
258 free_pages((unsigned long)xen_io_tlb_start, order);
259 return rc;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400260}
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400261void *
262xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +0200263 dma_addr_t *dma_handle, gfp_t flags,
264 struct dma_attrs *attrs)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400265{
266 void *ret;
267 int order = get_order(size);
268 u64 dma_mask = DMA_BIT_MASK(32);
269 unsigned long vstart;
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400270 phys_addr_t phys;
271 dma_addr_t dev_addr;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400272
273 /*
274 * Ignore region specifiers - the kernel's ideas of
275 * pseudo-phys memory layout has nothing to do with the
276 * machine physical layout. We can't allocate highmem
277 * because we can't return a pointer to it.
278 */
279 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
280
281 if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
282 return ret;
283
284 vstart = __get_free_pages(flags, order);
285 ret = (void *)vstart;
286
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400287 if (!ret)
288 return ret;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400289
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400290 if (hwdev && hwdev->coherent_dma_mask)
Ronny Hegewaldb5031ed2012-08-31 09:57:52 +0000291 dma_mask = dma_alloc_coherent_mask(hwdev, flags);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400292
293 phys = virt_to_phys(ret);
294 dev_addr = xen_phys_to_bus(phys);
295 if (((dev_addr + size - 1 <= dma_mask)) &&
296 !range_straddles_page_boundary(phys, size))
297 *dma_handle = dev_addr;
298 else {
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400299 if (xen_create_contiguous_region(vstart, order,
300 fls64(dma_mask)) != 0) {
301 free_pages(vstart, order);
302 return NULL;
303 }
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400304 *dma_handle = virt_to_machine(ret).maddr;
305 }
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400306 memset(ret, 0, size);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400307 return ret;
308}
309EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
310
311void
312xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +0200313 dma_addr_t dev_addr, struct dma_attrs *attrs)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400314{
315 int order = get_order(size);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400316 phys_addr_t phys;
317 u64 dma_mask = DMA_BIT_MASK(32);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400318
319 if (dma_release_from_coherent(hwdev, order, vaddr))
320 return;
321
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400322 if (hwdev && hwdev->coherent_dma_mask)
323 dma_mask = hwdev->coherent_dma_mask;
324
325 phys = virt_to_phys(vaddr);
326
327 if (((dev_addr + size - 1 > dma_mask)) ||
328 range_straddles_page_boundary(phys, size))
329 xen_destroy_contiguous_region((unsigned long)vaddr, order);
330
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400331 free_pages((unsigned long)vaddr, order);
332}
333EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
334
335
336/*
337 * Map a single buffer of the indicated size for DMA in streaming mode. The
338 * physical address to use is returned.
339 *
340 * Once the device is given the dma address, the device owns this memory until
341 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
342 */
343dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
344 unsigned long offset, size_t size,
345 enum dma_data_direction dir,
346 struct dma_attrs *attrs)
347{
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700348 phys_addr_t map, phys = page_to_phys(page) + offset;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400349 dma_addr_t dev_addr = xen_phys_to_bus(phys);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400350
351 BUG_ON(dir == DMA_NONE);
352 /*
353 * If the address happens to be in the device's DMA window,
354 * we can safely return the device addr and not worry about bounce
355 * buffering it.
356 */
357 if (dma_capable(dev, dev_addr, size) &&
358 !range_straddles_page_boundary(phys, size) && !swiotlb_force)
359 return dev_addr;
360
361 /*
362 * Oh well, have to allocate and map a bounce buffer.
363 */
Zoltan Kiss2b2b6142013-09-04 21:11:05 +0100364 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
365
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400366 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700367 if (map == SWIOTLB_MAP_ERROR)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400368 return DMA_ERROR_CODE;
369
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700370 dev_addr = xen_phys_to_bus(map);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400371
372 /*
373 * Ensure that the address returned is DMA'ble
374 */
Konrad Rzeszutek Wilkab2a47b2011-07-22 12:51:48 -0400375 if (!dma_capable(dev, dev_addr, size)) {
Alexander Duyck61ca08c2012-10-15 10:19:44 -0700376 swiotlb_tbl_unmap_single(dev, map, size, dir);
Konrad Rzeszutek Wilkab2a47b2011-07-22 12:51:48 -0400377 dev_addr = 0;
378 }
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400379 return dev_addr;
380}
381EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
382
383/*
384 * Unmap a single streaming mode DMA translation. The dma_addr and size must
385 * match what was provided for in a previous xen_swiotlb_map_page call. All
386 * other usages are undefined.
387 *
388 * After this call, reads by the cpu to the buffer are guaranteed to see
389 * whatever the device wrote there.
390 */
391static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
392 size_t size, enum dma_data_direction dir)
393{
394 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
395
396 BUG_ON(dir == DMA_NONE);
397
398 /* NOTE: We use dev_addr here, not paddr! */
399 if (is_xen_swiotlb_buffer(dev_addr)) {
Alexander Duyck61ca08c2012-10-15 10:19:44 -0700400 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400401 return;
402 }
403
404 if (dir != DMA_FROM_DEVICE)
405 return;
406
407 /*
408 * phys_to_virt doesn't work with hihgmem page but we could
409 * call dma_mark_clean() with hihgmem page here. However, we
410 * are fine since dma_mark_clean() is null on POWERPC. We can
411 * make dma_mark_clean() take a physical address if necessary.
412 */
413 dma_mark_clean(phys_to_virt(paddr), size);
414}
415
416void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
417 size_t size, enum dma_data_direction dir,
418 struct dma_attrs *attrs)
419{
420 xen_unmap_single(hwdev, dev_addr, size, dir);
421}
422EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
423
424/*
425 * Make physical memory consistent for a single streaming mode DMA translation
426 * after a transfer.
427 *
428 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
429 * using the cpu, yet do not wish to teardown the dma mapping, you must
430 * call this function before doing so. At the next point you give the dma
431 * address back to the card, you must first perform a
432 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
433 */
434static void
435xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
436 size_t size, enum dma_data_direction dir,
437 enum dma_sync_target target)
438{
439 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
440
441 BUG_ON(dir == DMA_NONE);
442
443 /* NOTE: We use dev_addr here, not paddr! */
444 if (is_xen_swiotlb_buffer(dev_addr)) {
Alexander Duyckfbfda892012-10-15 10:19:49 -0700445 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400446 return;
447 }
448
449 if (dir != DMA_FROM_DEVICE)
450 return;
451
452 dma_mark_clean(phys_to_virt(paddr), size);
453}
454
455void
456xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
457 size_t size, enum dma_data_direction dir)
458{
459 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
460}
461EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
462
463void
464xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
465 size_t size, enum dma_data_direction dir)
466{
467 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
468}
469EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
470
471/*
472 * Map a set of buffers described by scatterlist in streaming mode for DMA.
473 * This is the scatter-gather version of the above xen_swiotlb_map_page
474 * interface. Here the scatter gather list elements are each tagged with the
475 * appropriate dma address and length. They are obtained via
476 * sg_dma_{address,length}(SG).
477 *
478 * NOTE: An implementation may be able to use a smaller number of
479 * DMA address/length pairs than there are SG table elements.
480 * (for example via virtual mapping capabilities)
481 * The routine returns the number of addr/length pairs actually
482 * used, at most nents.
483 *
484 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
485 * same here.
486 */
487int
488xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
489 int nelems, enum dma_data_direction dir,
490 struct dma_attrs *attrs)
491{
492 struct scatterlist *sg;
493 int i;
494
495 BUG_ON(dir == DMA_NONE);
496
497 for_each_sg(sgl, sg, nelems, i) {
498 phys_addr_t paddr = sg_phys(sg);
499 dma_addr_t dev_addr = xen_phys_to_bus(paddr);
500
501 if (swiotlb_force ||
502 !dma_capable(hwdev, dev_addr, sg->length) ||
503 range_straddles_page_boundary(paddr, sg->length)) {
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700504 phys_addr_t map = swiotlb_tbl_map_single(hwdev,
505 start_dma_addr,
506 sg_phys(sg),
507 sg->length,
508 dir);
509 if (map == SWIOTLB_MAP_ERROR) {
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400510 /* Don't panic here, we expect map_sg users
511 to do proper error handling. */
512 xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
513 attrs);
Stefano Stabellini781575c2013-08-05 17:30:48 +0100514 sg_dma_len(sgl) = 0;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400515 return DMA_ERROR_CODE;
516 }
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700517 sg->dma_address = xen_phys_to_bus(map);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400518 } else
519 sg->dma_address = dev_addr;
Stefano Stabellini781575c2013-08-05 17:30:48 +0100520 sg_dma_len(sg) = sg->length;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400521 }
522 return nelems;
523}
524EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
525
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400526/*
527 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
528 * concerning calls here are the same as for swiotlb_unmap_page() above.
529 */
530void
531xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
532 int nelems, enum dma_data_direction dir,
533 struct dma_attrs *attrs)
534{
535 struct scatterlist *sg;
536 int i;
537
538 BUG_ON(dir == DMA_NONE);
539
540 for_each_sg(sgl, sg, nelems, i)
Stefano Stabellini781575c2013-08-05 17:30:48 +0100541 xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400542
543}
544EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
545
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400546/*
547 * Make physical memory consistent for a set of streaming mode DMA translations
548 * after a transfer.
549 *
550 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
551 * and usage.
552 */
553static void
554xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
555 int nelems, enum dma_data_direction dir,
556 enum dma_sync_target target)
557{
558 struct scatterlist *sg;
559 int i;
560
561 for_each_sg(sgl, sg, nelems, i)
562 xen_swiotlb_sync_single(hwdev, sg->dma_address,
Stefano Stabellini781575c2013-08-05 17:30:48 +0100563 sg_dma_len(sg), dir, target);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400564}
565
566void
567xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
568 int nelems, enum dma_data_direction dir)
569{
570 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
571}
572EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
573
574void
575xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
576 int nelems, enum dma_data_direction dir)
577{
578 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
579}
580EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
581
582int
583xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
584{
585 return !dma_addr;
586}
587EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error);
588
589/*
590 * Return whether the given device DMA address mask can be supported
591 * properly. For example, if your device can only drive the low 24-bits
592 * during bus mastering, then you would pass 0x00ffffff as the mask to
593 * this function.
594 */
595int
596xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
597{
598 return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
599}
600EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);