blob: aba12009422e343d23723f28100fd51bf987ab00 [file] [log] [blame]
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -04001/*
2 * Copyright 2010
3 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
4 *
5 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License v2.0 as published by
9 * the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * PV guests under Xen are running in an non-contiguous memory architecture.
17 *
18 * When PCI pass-through is utilized, this necessitates an IOMMU for
19 * translating bus (DMA) to virtual and vice-versa and also providing a
20 * mechanism to have contiguous pages for device drivers operations (say DMA
21 * operations).
22 *
23 * Specifically, under Xen the Linux idea of pages is an illusion. It
24 * assumes that pages start at zero and go up to the available memory. To
25 * help with that, the Linux Xen MMU provides a lookup mechanism to
26 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
27 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
28 * memory is not contiguous. Xen hypervisor stitches memory for guests
29 * from different pools, which means there is no guarantee that PFN==MFN
30 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
31 * allocated in descending order (high to low), meaning the guest might
32 * never get any MFN's under the 4GB mark.
33 *
34 */
35
Joe Perches283c0972013-06-28 03:21:41 -070036#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
37
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -040038#include <linux/bootmem.h>
39#include <linux/dma-mapping.h>
Paul Gortmaker63c97442011-07-10 13:22:07 -040040#include <linux/export.h>
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -040041#include <xen/swiotlb-xen.h>
42#include <xen/page.h>
43#include <xen/xen-ops.h>
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -040044#include <xen/hvc-console.h>
Zoltan Kiss2b2b6142013-09-04 21:11:05 +010045
Stefano Stabellini83862cc2013-10-10 13:40:44 +000046#include <asm/dma-mapping.h>
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +000047#include <asm/xen/page-coherent.h>
Konrad Rzeszutek Wilke1d8f622013-11-08 15:36:09 -050048
Zoltan Kiss2b2b6142013-09-04 21:11:05 +010049#include <trace/events/swiotlb.h>
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -040050/*
51 * Used to do a quick range check in swiotlb_tbl_unmap_single and
52 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
53 * API.
54 */
55
Stefano Stabellini83862cc2013-10-10 13:40:44 +000056#ifndef CONFIG_X86
57static unsigned long dma_alloc_coherent_mask(struct device *dev,
58 gfp_t gfp)
59{
60 unsigned long dma_mask = 0;
61
62 dma_mask = dev->coherent_dma_mask;
63 if (!dma_mask)
64 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
65
66 return dma_mask;
67}
68#endif
69
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -040070static char *xen_io_tlb_start, *xen_io_tlb_end;
71static unsigned long xen_io_tlb_nslabs;
72/*
73 * Quick lookup value of the bus address of the IOTLB.
74 */
75
Konrad Rzeszutek Wilkb8b0f552012-08-21 14:49:34 -040076static u64 start_dma_addr;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -040077
Ian Campbelle17b2f12014-01-20 11:30:41 +000078/*
Julien Grall9435cce2015-09-09 15:18:45 +010079 * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t
Ian Campbelle17b2f12014-01-20 11:30:41 +000080 * can be 32bit when dma_addr_t is 64bit leading to a loss in
81 * information if the shift is done before casting to 64bit.
82 */
Stefano Stabellini6b42a7e2013-10-25 10:33:27 +000083static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -040084{
Julien Grall9435cce2015-09-09 15:18:45 +010085 unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
86 dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT;
Ian Campbelle17b2f12014-01-20 11:30:41 +000087
Julien Grall9435cce2015-09-09 15:18:45 +010088 dma |= paddr & ~XEN_PAGE_MASK;
Ian Campbelle17b2f12014-01-20 11:30:41 +000089
90 return dma;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -040091}
92
Stefano Stabellini6b42a7e2013-10-25 10:33:27 +000093static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -040094{
Julien Grall9435cce2015-09-09 15:18:45 +010095 unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
96 dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT;
Ian Campbelle17b2f12014-01-20 11:30:41 +000097 phys_addr_t paddr = dma;
98
Julien Grall9435cce2015-09-09 15:18:45 +010099 paddr |= baddr & ~XEN_PAGE_MASK;
Ian Campbelle17b2f12014-01-20 11:30:41 +0000100
101 return paddr;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400102}
103
Stefano Stabellini6b42a7e2013-10-25 10:33:27 +0000104static inline dma_addr_t xen_virt_to_bus(void *address)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400105{
106 return xen_phys_to_bus(virt_to_phys(address));
107}
108
Julien Grall9435cce2015-09-09 15:18:45 +0100109static int check_pages_physically_contiguous(unsigned long xen_pfn,
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400110 unsigned int offset,
111 size_t length)
112{
Julien Grall32e09872015-08-07 17:34:35 +0100113 unsigned long next_bfn;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400114 int i;
115 int nr_pages;
116
Julien Grall9435cce2015-09-09 15:18:45 +0100117 next_bfn = pfn_to_bfn(xen_pfn);
118 nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400119
120 for (i = 1; i < nr_pages; i++) {
Julien Grall9435cce2015-09-09 15:18:45 +0100121 if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400122 return 0;
123 }
124 return 1;
125}
126
Stefano Stabellini6b42a7e2013-10-25 10:33:27 +0000127static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400128{
Julien Grall9435cce2015-09-09 15:18:45 +0100129 unsigned long xen_pfn = XEN_PFN_DOWN(p);
130 unsigned int offset = p & ~XEN_PAGE_MASK;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400131
Julien Grall9435cce2015-09-09 15:18:45 +0100132 if (offset + size <= XEN_PAGE_SIZE)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400133 return 0;
Julien Grall9435cce2015-09-09 15:18:45 +0100134 if (check_pages_physically_contiguous(xen_pfn, offset, size))
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400135 return 0;
136 return 1;
137}
138
139static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
140{
Julien Grall9435cce2015-09-09 15:18:45 +0100141 unsigned long bfn = XEN_PFN_DOWN(dma_addr);
142 unsigned long xen_pfn = bfn_to_local_pfn(bfn);
143 phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400144
145 /* If the address is outside our domain, it CAN
146 * have the same virtual address as another address
147 * in our domain. Therefore _only_ check address within our domain.
148 */
Julien Grall9435cce2015-09-09 15:18:45 +0100149 if (pfn_valid(PFN_DOWN(paddr))) {
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400150 return paddr >= virt_to_phys(xen_io_tlb_start) &&
151 paddr < virt_to_phys(xen_io_tlb_end);
152 }
153 return 0;
154}
155
156static int max_dma_bits = 32;
157
158static int
159xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
160{
161 int i, rc;
162 int dma_bits;
Stefano Stabellini69908902013-10-09 16:56:32 +0000163 dma_addr_t dma_handle;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000164 phys_addr_t p = virt_to_phys(buf);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400165
166 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
167
168 i = 0;
169 do {
170 int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
171
172 do {
173 rc = xen_create_contiguous_region(
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000174 p + (i << IO_TLB_SHIFT),
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400175 get_order(slabs << IO_TLB_SHIFT),
Stefano Stabellini69908902013-10-09 16:56:32 +0000176 dma_bits, &dma_handle);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400177 } while (rc && dma_bits++ < max_dma_bits);
178 if (rc)
179 return rc;
180
181 i += slabs;
182 } while (i < nslabs);
183 return 0;
184}
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400185static unsigned long xen_set_nslabs(unsigned long nr_tbl)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400186{
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400187 if (!nr_tbl) {
FUJITA Tomonori5f98ecd2011-06-05 11:47:29 +0900188 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
189 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400190 } else
191 xen_io_tlb_nslabs = nr_tbl;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400192
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400193 return xen_io_tlb_nslabs << IO_TLB_SHIFT;
194}
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400195
196enum xen_swiotlb_err {
197 XEN_SWIOTLB_UNKNOWN = 0,
198 XEN_SWIOTLB_ENOMEM,
199 XEN_SWIOTLB_EFIXUP
200};
201
202static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
203{
204 switch (err) {
205 case XEN_SWIOTLB_ENOMEM:
206 return "Cannot allocate Xen-SWIOTLB buffer\n";
207 case XEN_SWIOTLB_EFIXUP:
208 return "Failed to get contiguous memory for DMA from Xen!\n"\
209 "You either: don't have the permissions, do not have"\
210 " enough free memory under 4GB, or the hypervisor memory"\
211 " is too fragmented!";
212 default:
213 break;
214 }
215 return "";
216}
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400217int __ref xen_swiotlb_init(int verbose, bool early)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400218{
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400219 unsigned long bytes, order;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400220 int rc = -ENOMEM;
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400221 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400222 unsigned int repeat = 3;
223
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400224 xen_io_tlb_nslabs = swiotlb_nr_tbl();
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400225retry:
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400226 bytes = xen_set_nslabs(xen_io_tlb_nslabs);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400227 order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400228 /*
229 * Get IO TLB memory from any location.
230 */
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400231 if (early)
232 xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
233 else {
234#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
235#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
236 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
Stefano Stabellini87465152015-04-24 10:16:40 +0100237 xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400238 if (xen_io_tlb_start)
239 break;
240 order--;
241 }
242 if (order != get_order(bytes)) {
Joe Perches283c0972013-06-28 03:21:41 -0700243 pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
244 (PAGE_SIZE << order) >> 20);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400245 xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
246 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
247 }
248 }
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400249 if (!xen_io_tlb_start) {
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400250 m_ret = XEN_SWIOTLB_ENOMEM;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400251 goto error;
252 }
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400253 xen_io_tlb_end = xen_io_tlb_start + bytes;
254 /*
255 * And replace that memory with pages under 4GB.
256 */
257 rc = xen_swiotlb_fixup(xen_io_tlb_start,
258 bytes,
259 xen_io_tlb_nslabs);
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400260 if (rc) {
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400261 if (early)
262 free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
263 else {
264 free_pages((unsigned long)xen_io_tlb_start, order);
265 xen_io_tlb_start = NULL;
266 }
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400267 m_ret = XEN_SWIOTLB_EFIXUP;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400268 goto error;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400269 }
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400270 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
Konrad Rzeszutek Wilkc468bde2012-09-17 10:20:09 -0400271 if (early) {
Yinghai Luac2cbab2013-01-24 12:20:16 -0800272 if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
273 verbose))
274 panic("Cannot allocate SWIOTLB buffer");
Konrad Rzeszutek Wilkc468bde2012-09-17 10:20:09 -0400275 rc = 0;
276 } else
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400277 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
278 return rc;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400279error:
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400280 if (repeat--) {
281 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
282 (xen_io_tlb_nslabs >> 1));
Joe Perches283c0972013-06-28 03:21:41 -0700283 pr_info("Lowering to %luMB\n",
284 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400285 goto retry;
286 }
Joe Perches283c0972013-06-28 03:21:41 -0700287 pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400288 if (early)
289 panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
290 else
291 free_pages((unsigned long)xen_io_tlb_start, order);
292 return rc;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400293}
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400294void *
295xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +0200296 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700297 unsigned long attrs)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400298{
299 void *ret;
300 int order = get_order(size);
301 u64 dma_mask = DMA_BIT_MASK(32);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400302 phys_addr_t phys;
303 dma_addr_t dev_addr;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400304
305 /*
306 * Ignore region specifiers - the kernel's ideas of
307 * pseudo-phys memory layout has nothing to do with the
308 * machine physical layout. We can't allocate highmem
309 * because we can't return a pointer to it.
310 */
311 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
312
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000313 /* On ARM this function returns an ioremap'ped virtual address for
314 * which virt_to_phys doesn't return the corresponding physical
315 * address. In fact on ARM virt_to_phys only works for kernel direct
316 * mapped RAM memory. Also see comment below.
317 */
318 ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400319
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400320 if (!ret)
321 return ret;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400322
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400323 if (hwdev && hwdev->coherent_dma_mask)
Ronny Hegewaldb5031ed2012-08-31 09:57:52 +0000324 dma_mask = dma_alloc_coherent_mask(hwdev, flags);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400325
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000326 /* At this point dma_handle is the physical address, next we are
327 * going to set it to the machine address.
328 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
329 * to *dma_handle. */
330 phys = *dma_handle;
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400331 dev_addr = xen_phys_to_bus(phys);
332 if (((dev_addr + size - 1 <= dma_mask)) &&
333 !range_straddles_page_boundary(phys, size))
334 *dma_handle = dev_addr;
335 else {
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000336 if (xen_create_contiguous_region(phys, order,
Stefano Stabellini69908902013-10-09 16:56:32 +0000337 fls64(dma_mask), dma_handle) != 0) {
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000338 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400339 return NULL;
340 }
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400341 }
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400342 memset(ret, 0, size);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400343 return ret;
344}
345EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
346
347void
348xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700349 dma_addr_t dev_addr, unsigned long attrs)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400350{
351 int order = get_order(size);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400352 phys_addr_t phys;
353 u64 dma_mask = DMA_BIT_MASK(32);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400354
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400355 if (hwdev && hwdev->coherent_dma_mask)
356 dma_mask = hwdev->coherent_dma_mask;
357
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000358 /* do not use virt_to_phys because on ARM it doesn't return you the
359 * physical address */
360 phys = xen_bus_to_phys(dev_addr);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400361
362 if (((dev_addr + size - 1 > dma_mask)) ||
363 range_straddles_page_boundary(phys, size))
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000364 xen_destroy_contiguous_region(phys, order);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400365
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000366 xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400367}
368EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
369
370
371/*
372 * Map a single buffer of the indicated size for DMA in streaming mode. The
373 * physical address to use is returned.
374 *
375 * Once the device is given the dma address, the device owns this memory until
376 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
377 */
378dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
379 unsigned long offset, size_t size,
380 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700381 unsigned long attrs)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400382{
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700383 phys_addr_t map, phys = page_to_phys(page) + offset;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400384 dma_addr_t dev_addr = xen_phys_to_bus(phys);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400385
386 BUG_ON(dir == DMA_NONE);
387 /*
388 * If the address happens to be in the device's DMA window,
389 * we can safely return the device addr and not worry about bounce
390 * buffering it.
391 */
392 if (dma_capable(dev, dev_addr, size) &&
Stefano Stabellinia4dba132014-11-21 11:07:39 +0000393 !range_straddles_page_boundary(phys, size) &&
Julien Grall291be102015-09-09 15:17:33 +0100394 !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
Geert Uytterhoevenae7871b2016-12-16 14:28:41 +0100395 (swiotlb_force != SWIOTLB_FORCE)) {
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000396 /* we are not interested in the dma_addr returned by
397 * xen_dma_map_page, only in the potential cache flushes executed
398 * by the function. */
Stefano Stabellinia0f2dee2014-11-21 11:04:39 +0000399 xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400400 return dev_addr;
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000401 }
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400402
403 /*
404 * Oh well, have to allocate and map a bounce buffer.
405 */
Zoltan Kiss2b2b6142013-09-04 21:11:05 +0100406 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
407
Alexander Duyck0443fa02016-11-02 07:13:02 -0400408 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
409 attrs);
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700410 if (map == SWIOTLB_MAP_ERROR)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400411 return DMA_ERROR_CODE;
412
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000413 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
Stefano Stabellinia0f2dee2014-11-21 11:04:39 +0000414 dev_addr, map & ~PAGE_MASK, size, dir, attrs);
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700415 dev_addr = xen_phys_to_bus(map);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400416
417 /*
418 * Ensure that the address returned is DMA'ble
419 */
Alexander Duyck76418422016-11-02 07:12:47 -0400420 if (dma_capable(dev, dev_addr, size))
421 return dev_addr;
422
Alexander Duyckd29fa0c2016-11-10 07:05:31 -0500423 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
424 swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
Alexander Duyck76418422016-11-02 07:12:47 -0400425
426 return DMA_ERROR_CODE;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400427}
428EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
429
430/*
431 * Unmap a single streaming mode DMA translation. The dma_addr and size must
432 * match what was provided for in a previous xen_swiotlb_map_page call. All
433 * other usages are undefined.
434 *
435 * After this call, reads by the cpu to the buffer are guaranteed to see
436 * whatever the device wrote there.
437 */
438static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000439 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700440 unsigned long attrs)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400441{
442 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
443
444 BUG_ON(dir == DMA_NONE);
445
Stefano Stabellinid6883e62014-11-21 11:09:39 +0000446 xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000447
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400448 /* NOTE: We use dev_addr here, not paddr! */
449 if (is_xen_swiotlb_buffer(dev_addr)) {
Alexander Duyck0443fa02016-11-02 07:13:02 -0400450 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400451 return;
452 }
453
454 if (dir != DMA_FROM_DEVICE)
455 return;
456
457 /*
458 * phys_to_virt doesn't work with hihgmem page but we could
459 * call dma_mark_clean() with hihgmem page here. However, we
460 * are fine since dma_mark_clean() is null on POWERPC. We can
461 * make dma_mark_clean() take a physical address if necessary.
462 */
463 dma_mark_clean(phys_to_virt(paddr), size);
464}
465
466void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
467 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700468 unsigned long attrs)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400469{
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000470 xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400471}
472EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
473
474/*
475 * Make physical memory consistent for a single streaming mode DMA translation
476 * after a transfer.
477 *
478 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
479 * using the cpu, yet do not wish to teardown the dma mapping, you must
480 * call this function before doing so. At the next point you give the dma
481 * address back to the card, you must first perform a
482 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
483 */
484static void
485xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
486 size_t size, enum dma_data_direction dir,
487 enum dma_sync_target target)
488{
489 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
490
491 BUG_ON(dir == DMA_NONE);
492
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000493 if (target == SYNC_FOR_CPU)
Stefano Stabellinid6883e62014-11-21 11:09:39 +0000494 xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000495
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400496 /* NOTE: We use dev_addr here, not paddr! */
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000497 if (is_xen_swiotlb_buffer(dev_addr))
Alexander Duyckfbfda892012-10-15 10:19:49 -0700498 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000499
500 if (target == SYNC_FOR_DEVICE)
Stefano Stabellini9490c6c2014-11-21 16:55:12 +0000501 xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400502
503 if (dir != DMA_FROM_DEVICE)
504 return;
505
506 dma_mark_clean(phys_to_virt(paddr), size);
507}
508
509void
510xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
511 size_t size, enum dma_data_direction dir)
512{
513 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
514}
515EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
516
517void
518xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
519 size_t size, enum dma_data_direction dir)
520{
521 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
522}
523EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
524
525/*
526 * Map a set of buffers described by scatterlist in streaming mode for DMA.
527 * This is the scatter-gather version of the above xen_swiotlb_map_page
528 * interface. Here the scatter gather list elements are each tagged with the
529 * appropriate dma address and length. They are obtained via
530 * sg_dma_{address,length}(SG).
531 *
532 * NOTE: An implementation may be able to use a smaller number of
533 * DMA address/length pairs than there are SG table elements.
534 * (for example via virtual mapping capabilities)
535 * The routine returns the number of addr/length pairs actually
536 * used, at most nents.
537 *
538 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
539 * same here.
540 */
541int
542xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
543 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700544 unsigned long attrs)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400545{
546 struct scatterlist *sg;
547 int i;
548
549 BUG_ON(dir == DMA_NONE);
550
551 for_each_sg(sgl, sg, nelems, i) {
552 phys_addr_t paddr = sg_phys(sg);
553 dma_addr_t dev_addr = xen_phys_to_bus(paddr);
554
Geert Uytterhoevenae7871b2016-12-16 14:28:41 +0100555 if (swiotlb_force == SWIOTLB_FORCE ||
Julien Grall291be102015-09-09 15:17:33 +0100556 xen_arch_need_swiotlb(hwdev, paddr, dev_addr) ||
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400557 !dma_capable(hwdev, dev_addr, sg->length) ||
558 range_straddles_page_boundary(paddr, sg->length)) {
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700559 phys_addr_t map = swiotlb_tbl_map_single(hwdev,
560 start_dma_addr,
561 sg_phys(sg),
562 sg->length,
Alexander Duyck0443fa02016-11-02 07:13:02 -0400563 dir, attrs);
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700564 if (map == SWIOTLB_MAP_ERROR) {
Stefano Stabellini783d0282013-10-25 10:33:26 +0000565 dev_warn(hwdev, "swiotlb buffer is full\n");
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400566 /* Don't panic here, we expect map_sg users
567 to do proper error handling. */
Alexander Duyck0443fa02016-11-02 07:13:02 -0400568 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400569 xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
570 attrs);
Stefano Stabellini781575c2013-08-05 17:30:48 +0100571 sg_dma_len(sgl) = 0;
Stefano Stabellini15177602013-10-29 00:37:37 +0000572 return 0;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400573 }
Stefano Stabellini71bfae92013-11-15 14:22:15 +0000574 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
Stefano Stabellinia0f2dee2014-11-21 11:04:39 +0000575 dev_addr,
Stefano Stabellini71bfae92013-11-15 14:22:15 +0000576 map & ~PAGE_MASK,
577 sg->length,
578 dir,
579 attrs);
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700580 sg->dma_address = xen_phys_to_bus(map);
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000581 } else {
582 /* we are not interested in the dma_addr returned by
583 * xen_dma_map_page, only in the potential cache flushes executed
584 * by the function. */
585 xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
Stefano Stabellinia0f2dee2014-11-21 11:04:39 +0000586 dev_addr,
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000587 paddr & ~PAGE_MASK,
588 sg->length,
589 dir,
590 attrs);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400591 sg->dma_address = dev_addr;
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000592 }
Stefano Stabellini781575c2013-08-05 17:30:48 +0100593 sg_dma_len(sg) = sg->length;
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400594 }
595 return nelems;
596}
597EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
598
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400599/*
600 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
601 * concerning calls here are the same as for swiotlb_unmap_page() above.
602 */
603void
604xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
605 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700606 unsigned long attrs)
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400607{
608 struct scatterlist *sg;
609 int i;
610
611 BUG_ON(dir == DMA_NONE);
612
613 for_each_sg(sgl, sg, nelems, i)
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000614 xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400615
616}
617EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
618
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400619/*
620 * Make physical memory consistent for a set of streaming mode DMA translations
621 * after a transfer.
622 *
623 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
624 * and usage.
625 */
626static void
627xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
628 int nelems, enum dma_data_direction dir,
629 enum dma_sync_target target)
630{
631 struct scatterlist *sg;
632 int i;
633
634 for_each_sg(sgl, sg, nelems, i)
635 xen_swiotlb_sync_single(hwdev, sg->dma_address,
Stefano Stabellini781575c2013-08-05 17:30:48 +0100636 sg_dma_len(sg), dir, target);
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400637}
638
639void
640xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
641 int nelems, enum dma_data_direction dir)
642{
643 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
644}
645EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
646
647void
648xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
649 int nelems, enum dma_data_direction dir)
650{
651 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
652}
653EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
654
Konrad Rzeszutek Wilkb0971862010-05-11 10:05:49 -0400655/*
656 * Return whether the given device DMA address mask can be supported
657 * properly. For example, if your device can only drive the low 24-bits
658 * during bus mastering, then you would pass 0x00ffffff as the mask to
659 * this function.
660 */
661int
662xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
663{
664 return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
665}
666EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
Stefano Stabellinieb1ddc02013-10-09 16:56:33 +0000667
668int
669xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
670{
671 if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
672 return -EIO;
673
674 *dev->dma_mask = dma_mask;
675
676 return 0;
677}
678EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);