blob: bffe6d7ef9d9a01a52db11e12e54207bc9b41a0c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Dynamic DMA mapping support.
3 *
Jan Beulich563aaf02007-02-05 18:51:25 -08004 * This implementation is a fallback for platforms that do not support
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * I/O TLBs (aka DMA address translation hardware).
6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
8 * Copyright (C) 2000, 2003 Hewlett-Packard Co
9 * David Mosberger-Tang <davidm@hpl.hp.com>
10 *
11 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
12 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
13 * unnecessary i-cache flushing.
John W. Linville569c8bf2005-09-29 14:45:24 -070014 * 04/07/.. ak Better overflow handling. Assorted fixes.
15 * 05/09/10 linville Add support for syncing ranges, support syncing for
16 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
Becky Brucefb05a372008-12-22 10:26:09 -080017 * 08/12/11 beckyb Add highmem support
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 */
19
20#include <linux/cache.h>
Tony Luck17e5ad62005-09-29 15:52:13 -070021#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/mm.h>
23#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/spinlock.h>
25#include <linux/string.h>
Ian Campbell0016fde2008-12-16 12:17:27 -080026#include <linux/swiotlb.h>
Becky Brucefb05a372008-12-22 10:26:09 -080027#include <linux/pfn.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/types.h>
29#include <linux/ctype.h>
Jeremy Fitzhardingeef9b1892008-12-16 12:17:33 -080030#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
32#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/dma.h>
Tony Luck17e5ad62005-09-29 15:52:13 -070034#include <asm/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36#include <linux/init.h>
37#include <linux/bootmem.h>
FUJITA Tomonoria8522502008-04-29 00:59:36 -070038#include <linux/iommu-helper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40#define OFFSET(val,align) ((unsigned long) \
41 ( (val) & ( (align) - 1)))
42
Alex Williamson0b9afed2005-09-06 11:20:49 -060043#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
44
45/*
46 * Minimum IO TLB size to bother booting with. Systems with mainly
47 * 64bit capable cards will only lightly use the swiotlb. If we can't
48 * allocate a contiguous 1MB, we're probably in trouble anyway.
49 */
50#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
51
John W. Linvillede69e0f2005-09-29 14:44:57 -070052/*
53 * Enumeration for sync targets
54 */
55enum dma_sync_target {
56 SYNC_FOR_CPU = 0,
57 SYNC_FOR_DEVICE = 1,
58};
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060int swiotlb_force;
61
62/*
Becky Bruceceb5ac32009-04-08 09:09:15 -050063 * Used to do a quick range check in unmap_single and
64 * sync_single_*, to see if the memory was in fact allocated by this
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 * API.
66 */
67static char *io_tlb_start, *io_tlb_end;
68
69/*
70 * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
71 * io_tlb_end. This is command line adjustable via setup_io_tlb_npages.
72 */
73static unsigned long io_tlb_nslabs;
74
75/*
76 * When the IOMMU overflows we return a fallback buffer. This sets the size.
77 */
78static unsigned long io_tlb_overflow = 32*1024;
79
80void *io_tlb_overflow_buffer;
81
82/*
83 * This is a free list describing the number of free entries available from
84 * each index
85 */
86static unsigned int *io_tlb_list;
87static unsigned int io_tlb_index;
88
89/*
90 * We need to save away the original address corresponding to a mapped entry
91 * for the sync operations.
92 */
Becky Brucebc40ac62008-12-22 10:26:08 -080093static phys_addr_t *io_tlb_orig_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
95/*
96 * Protect the above data structures in the map and unmap calls
97 */
98static DEFINE_SPINLOCK(io_tlb_lock);
99
100static int __init
101setup_io_tlb_npages(char *str)
102{
103 if (isdigit(*str)) {
Alex Williamsone8579e72005-08-04 13:06:00 -0700104 io_tlb_nslabs = simple_strtoul(str, &str, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 /* avoid tail segment of size < IO_TLB_SEGSIZE */
106 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
107 }
108 if (*str == ',')
109 ++str;
110 if (!strcmp(str, "force"))
111 swiotlb_force = 1;
112 return 1;
113}
114__setup("swiotlb=", setup_io_tlb_npages);
115/* make io_tlb_overflow tunable too? */
116
Roland Dreier79ff56e2008-12-30 20:18:00 -0800117void * __weak __init swiotlb_alloc_boot(size_t size, unsigned long nslabs)
Jeremy Fitzhardinge8c5df162008-12-16 12:17:26 -0800118{
119 return alloc_bootmem_low_pages(size);
120}
121
122void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs)
123{
124 return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
125}
126
Jeremy Fitzhardinge70a7d3c2008-12-22 10:26:05 -0800127dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
Ian Campbelle08e1f72008-12-16 12:17:30 -0800128{
129 return paddr;
130}
131
Becky Bruce42d7c5e2009-04-08 09:09:21 -0500132phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
Ian Campbelle08e1f72008-12-16 12:17:30 -0800133{
134 return baddr;
135}
136
Jeremy Fitzhardinge70a7d3c2008-12-22 10:26:05 -0800137static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
138 volatile void *address)
Ian Campbelle08e1f72008-12-16 12:17:30 -0800139{
Jeremy Fitzhardinge70a7d3c2008-12-22 10:26:05 -0800140 return swiotlb_phys_to_bus(hwdev, virt_to_phys(address));
Ian Campbelle08e1f72008-12-16 12:17:30 -0800141}
142
Becky Bruce42d7c5e2009-04-08 09:09:21 -0500143void * __weak swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t address)
Ian Campbelle08e1f72008-12-16 12:17:30 -0800144{
Becky Bruce42d7c5e2009-04-08 09:09:21 -0500145 return phys_to_virt(swiotlb_bus_to_phys(hwdev, address));
Ian Campbelle08e1f72008-12-16 12:17:30 -0800146}
147
Becky Bruceef5722f2009-04-08 09:09:18 -0500148int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev,
149 dma_addr_t addr, size_t size)
150{
151 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
152}
153
Ian Campbell0b8698a2009-01-09 18:32:09 +0000154int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
Ian Campbellb81ea272008-12-16 12:17:31 -0800155{
156 return 0;
157}
158
Ian Campbell2e5b2b82008-12-16 12:17:34 -0800159static void swiotlb_print_info(unsigned long bytes)
160{
161 phys_addr_t pstart, pend;
Ian Campbell2e5b2b82008-12-16 12:17:34 -0800162
163 pstart = virt_to_phys(io_tlb_start);
164 pend = virt_to_phys(io_tlb_end);
165
Ian Campbell2e5b2b82008-12-16 12:17:34 -0800166 printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
167 bytes >> 20, io_tlb_start, io_tlb_end);
Jeremy Fitzhardinge70a7d3c2008-12-22 10:26:05 -0800168 printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
169 (unsigned long long)pstart,
170 (unsigned long long)pend);
Ian Campbell2e5b2b82008-12-16 12:17:34 -0800171}
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173/*
174 * Statically reserve bounce buffer space and initialize bounce buffer data
Tony Luck17e5ad62005-09-29 15:52:13 -0700175 * structures for the software IO TLB used to implement the DMA API.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 */
Jan Beulich563aaf02007-02-05 18:51:25 -0800177void __init
178swiotlb_init_with_default_size(size_t default_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179{
Jan Beulich563aaf02007-02-05 18:51:25 -0800180 unsigned long i, bytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
182 if (!io_tlb_nslabs) {
Alex Williamsone8579e72005-08-04 13:06:00 -0700183 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
185 }
186
Jan Beulich563aaf02007-02-05 18:51:25 -0800187 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 /*
190 * Get IO TLB memory from the low pages
191 */
Jeremy Fitzhardinge8c5df162008-12-16 12:17:26 -0800192 io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 if (!io_tlb_start)
194 panic("Cannot allocate SWIOTLB buffer");
Jan Beulich563aaf02007-02-05 18:51:25 -0800195 io_tlb_end = io_tlb_start + bytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
197 /*
198 * Allocate and initialize the free list array. This array is used
199 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
200 * between io_tlb_start and io_tlb_end.
201 */
202 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
Tony Luck25667d62007-03-06 13:31:45 -0800203 for (i = 0; i < io_tlb_nslabs; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
205 io_tlb_index = 0;
Becky Brucebc40ac62008-12-22 10:26:08 -0800206 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
208 /*
209 * Get the overflow emergency buffer
210 */
211 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
Jan Beulich563aaf02007-02-05 18:51:25 -0800212 if (!io_tlb_overflow_buffer)
213 panic("Cannot allocate SWIOTLB overflow buffer!\n");
214
Ian Campbell2e5b2b82008-12-16 12:17:34 -0800215 swiotlb_print_info(bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216}
217
Jan Beulich563aaf02007-02-05 18:51:25 -0800218void __init
219swiotlb_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220{
Tony Luck25667d62007-03-06 13:31:45 -0800221 swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222}
223
Alex Williamson0b9afed2005-09-06 11:20:49 -0600224/*
225 * Systems with larger DMA zones (those that don't support ISA) can
226 * initialize the swiotlb later using the slab allocator if needed.
227 * This should be just like above, but with some error catching.
228 */
229int
Jan Beulich563aaf02007-02-05 18:51:25 -0800230swiotlb_late_init_with_default_size(size_t default_size)
Alex Williamson0b9afed2005-09-06 11:20:49 -0600231{
Jan Beulich563aaf02007-02-05 18:51:25 -0800232 unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
Alex Williamson0b9afed2005-09-06 11:20:49 -0600233 unsigned int order;
234
235 if (!io_tlb_nslabs) {
236 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
237 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
238 }
239
240 /*
241 * Get IO TLB memory from the low pages
242 */
Jan Beulich563aaf02007-02-05 18:51:25 -0800243 order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
Alex Williamson0b9afed2005-09-06 11:20:49 -0600244 io_tlb_nslabs = SLABS_PER_PAGE << order;
Jan Beulich563aaf02007-02-05 18:51:25 -0800245 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
Alex Williamson0b9afed2005-09-06 11:20:49 -0600246
247 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
Jeremy Fitzhardinge8c5df162008-12-16 12:17:26 -0800248 io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs);
Alex Williamson0b9afed2005-09-06 11:20:49 -0600249 if (io_tlb_start)
250 break;
251 order--;
252 }
253
254 if (!io_tlb_start)
255 goto cleanup1;
256
Jan Beulich563aaf02007-02-05 18:51:25 -0800257 if (order != get_order(bytes)) {
Alex Williamson0b9afed2005-09-06 11:20:49 -0600258 printk(KERN_WARNING "Warning: only able to allocate %ld MB "
259 "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
260 io_tlb_nslabs = SLABS_PER_PAGE << order;
Jan Beulich563aaf02007-02-05 18:51:25 -0800261 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
Alex Williamson0b9afed2005-09-06 11:20:49 -0600262 }
Jan Beulich563aaf02007-02-05 18:51:25 -0800263 io_tlb_end = io_tlb_start + bytes;
264 memset(io_tlb_start, 0, bytes);
Alex Williamson0b9afed2005-09-06 11:20:49 -0600265
266 /*
267 * Allocate and initialize the free list array. This array is used
268 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
269 * between io_tlb_start and io_tlb_end.
270 */
271 io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
272 get_order(io_tlb_nslabs * sizeof(int)));
273 if (!io_tlb_list)
274 goto cleanup2;
275
276 for (i = 0; i < io_tlb_nslabs; i++)
277 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
278 io_tlb_index = 0;
279
Becky Brucebc40ac62008-12-22 10:26:08 -0800280 io_tlb_orig_addr = (phys_addr_t *)
281 __get_free_pages(GFP_KERNEL,
282 get_order(io_tlb_nslabs *
283 sizeof(phys_addr_t)));
Alex Williamson0b9afed2005-09-06 11:20:49 -0600284 if (!io_tlb_orig_addr)
285 goto cleanup3;
286
Becky Brucebc40ac62008-12-22 10:26:08 -0800287 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
Alex Williamson0b9afed2005-09-06 11:20:49 -0600288
289 /*
290 * Get the overflow emergency buffer
291 */
292 io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
293 get_order(io_tlb_overflow));
294 if (!io_tlb_overflow_buffer)
295 goto cleanup4;
296
Ian Campbell2e5b2b82008-12-16 12:17:34 -0800297 swiotlb_print_info(bytes);
Alex Williamson0b9afed2005-09-06 11:20:49 -0600298
299 return 0;
300
301cleanup4:
Becky Brucebc40ac62008-12-22 10:26:08 -0800302 free_pages((unsigned long)io_tlb_orig_addr,
303 get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
Alex Williamson0b9afed2005-09-06 11:20:49 -0600304 io_tlb_orig_addr = NULL;
305cleanup3:
Tony Luck25667d62007-03-06 13:31:45 -0800306 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
307 sizeof(int)));
Alex Williamson0b9afed2005-09-06 11:20:49 -0600308 io_tlb_list = NULL;
Alex Williamson0b9afed2005-09-06 11:20:49 -0600309cleanup2:
Jan Beulich563aaf02007-02-05 18:51:25 -0800310 io_tlb_end = NULL;
Alex Williamson0b9afed2005-09-06 11:20:49 -0600311 free_pages((unsigned long)io_tlb_start, order);
312 io_tlb_start = NULL;
313cleanup1:
314 io_tlb_nslabs = req_nslabs;
315 return -ENOMEM;
316}
317
Becky Bruceef5722f2009-04-08 09:09:18 -0500318static inline int
FUJITA Tomonori27979822008-09-10 01:06:49 +0900319address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320{
Becky Bruceef5722f2009-04-08 09:09:18 -0500321 return swiotlb_arch_address_needs_mapping(hwdev, addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322}
323
Ian Campbell0b8698a2009-01-09 18:32:09 +0000324static inline int range_needs_mapping(phys_addr_t paddr, size_t size)
Ian Campbellb81ea272008-12-16 12:17:31 -0800325{
Ian Campbell0b8698a2009-01-09 18:32:09 +0000326 return swiotlb_force || swiotlb_arch_range_needs_mapping(paddr, size);
Ian Campbellb81ea272008-12-16 12:17:31 -0800327}
328
FUJITA Tomonori640aebf2008-09-08 18:53:50 +0900329static int is_swiotlb_buffer(char *addr)
330{
331 return addr >= io_tlb_start && addr < io_tlb_end;
332}
333
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334/*
Becky Brucefb05a372008-12-22 10:26:09 -0800335 * Bounce: copy the swiotlb buffer back to the original dma location
336 */
337static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
338 enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339{
Becky Brucefb05a372008-12-22 10:26:09 -0800340 unsigned long pfn = PFN_DOWN(phys);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
Becky Brucefb05a372008-12-22 10:26:09 -0800342 if (PageHighMem(pfn_to_page(pfn))) {
343 /* The buffer does not have a mapping. Map it in and copy */
344 unsigned int offset = phys & ~PAGE_MASK;
345 char *buffer;
346 unsigned int sz = 0;
347 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348
Becky Brucefb05a372008-12-22 10:26:09 -0800349 while (size) {
Becky Bruce67131ad2009-04-08 09:09:16 -0500350 sz = min_t(size_t, PAGE_SIZE - offset, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351
Becky Brucefb05a372008-12-22 10:26:09 -0800352 local_irq_save(flags);
353 buffer = kmap_atomic(pfn_to_page(pfn),
354 KM_BOUNCE_READ);
355 if (dir == DMA_TO_DEVICE)
356 memcpy(dma_addr, buffer + offset, sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 else
Becky Brucefb05a372008-12-22 10:26:09 -0800358 memcpy(buffer + offset, dma_addr, sz);
359 kunmap_atomic(buffer, KM_BOUNCE_READ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 local_irq_restore(flags);
Becky Brucefb05a372008-12-22 10:26:09 -0800361
362 size -= sz;
363 pfn++;
364 dma_addr += sz;
365 offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 }
367 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 if (dir == DMA_TO_DEVICE)
Becky Brucefb05a372008-12-22 10:26:09 -0800369 memcpy(dma_addr, phys_to_virt(phys), size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 else
Becky Brucefb05a372008-12-22 10:26:09 -0800371 memcpy(phys_to_virt(phys), dma_addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 }
373}
374
375/*
376 * Allocates bounce buffer and returns its kernel virtual address.
377 */
378static void *
Becky Brucebc40ac62008-12-22 10:26:08 -0800379map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380{
381 unsigned long flags;
382 char *dma_addr;
383 unsigned int nslots, stride, index, wrap;
384 int i;
FUJITA Tomonori681cc5c2008-02-04 22:28:16 -0800385 unsigned long start_dma_addr;
386 unsigned long mask;
387 unsigned long offset_slots;
388 unsigned long max_slots;
389
390 mask = dma_get_seg_boundary(hwdev);
Jeremy Fitzhardinge70a7d3c2008-12-22 10:26:05 -0800391 start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
FUJITA Tomonori681cc5c2008-02-04 22:28:16 -0800392
393 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
Ian Campbella5ddde4a2008-12-16 12:17:29 -0800394
395 /*
396 * Carefully handle integer overflow which can occur when mask == ~0UL.
397 */
Jan Beulichb15a38912008-03-13 09:13:30 +0000398 max_slots = mask + 1
399 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
400 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401
402 /*
403 * For mappings greater than a page, we limit the stride (and
404 * hence alignment) to a page size.
405 */
406 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
407 if (size > PAGE_SIZE)
408 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
409 else
410 stride = 1;
411
Eric Sesterhenn34814542006-03-24 18:47:11 +0100412 BUG_ON(!nslots);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
414 /*
415 * Find suitable number of IO TLB entries size that will fit this
416 * request and allocate a buffer from that IO TLB pool.
417 */
418 spin_lock_irqsave(&io_tlb_lock, flags);
Andrew Mortona7133a12008-04-29 00:59:36 -0700419 index = ALIGN(io_tlb_index, stride);
420 if (index >= io_tlb_nslabs)
421 index = 0;
422 wrap = index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
Andrew Mortona7133a12008-04-29 00:59:36 -0700424 do {
FUJITA Tomonoria8522502008-04-29 00:59:36 -0700425 while (iommu_is_span_boundary(index, nslots, offset_slots,
426 max_slots)) {
Jan Beulichb15a38912008-03-13 09:13:30 +0000427 index += stride;
428 if (index >= io_tlb_nslabs)
429 index = 0;
Andrew Mortona7133a12008-04-29 00:59:36 -0700430 if (index == wrap)
431 goto not_found;
432 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
Andrew Mortona7133a12008-04-29 00:59:36 -0700434 /*
435 * If we find a slot that indicates we have 'nslots' number of
436 * contiguous buffers, we allocate the buffers from that slot
437 * and mark the entries as '0' indicating unavailable.
438 */
439 if (io_tlb_list[index] >= nslots) {
440 int count = 0;
441
442 for (i = index; i < (int) (index + nslots); i++)
443 io_tlb_list[i] = 0;
444 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
445 io_tlb_list[i] = ++count;
446 dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
447
448 /*
449 * Update the indices to avoid searching in the next
450 * round.
451 */
452 io_tlb_index = ((index + nslots) < io_tlb_nslabs
453 ? (index + nslots) : 0);
454
455 goto found;
456 }
457 index += stride;
458 if (index >= io_tlb_nslabs)
459 index = 0;
460 } while (index != wrap);
461
462not_found:
463 spin_unlock_irqrestore(&io_tlb_lock, flags);
464 return NULL;
465found:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 spin_unlock_irqrestore(&io_tlb_lock, flags);
467
468 /*
469 * Save away the mapping from the original address to the DMA address.
470 * This is needed when we sync the memory. Then we sync the buffer if
471 * needed.
472 */
Becky Brucebc40ac62008-12-22 10:26:08 -0800473 for (i = 0; i < nslots; i++)
474 io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
Becky Brucefb05a372008-12-22 10:26:09 -0800476 swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
478 return dma_addr;
479}
480
481/*
482 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
483 */
484static void
Becky Bruce7fcebbd2009-04-08 09:09:19 -0500485do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486{
487 unsigned long flags;
488 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
489 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
Becky Brucebc40ac62008-12-22 10:26:08 -0800490 phys_addr_t phys = io_tlb_orig_addr[index];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
492 /*
493 * First, sync the memory before unmapping the entry
494 */
Becky Brucebc40ac62008-12-22 10:26:08 -0800495 if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
Becky Brucefb05a372008-12-22 10:26:09 -0800496 swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
498 /*
499 * Return the buffer to the free list by setting the corresponding
500 * entries to indicate the number of contigous entries available.
501 * While returning the entries to the free list, we merge the entries
502 * with slots below and above the pool being returned.
503 */
504 spin_lock_irqsave(&io_tlb_lock, flags);
505 {
506 count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
507 io_tlb_list[index + nslots] : 0);
508 /*
509 * Step 1: return the slots to the free list, merging the
510 * slots with superceeding slots
511 */
512 for (i = index + nslots - 1; i >= index; i--)
513 io_tlb_list[i] = ++count;
514 /*
515 * Step 2: merge the returned slots with the preceding slots,
516 * if available (non zero)
517 */
518 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
519 io_tlb_list[i] = ++count;
520 }
521 spin_unlock_irqrestore(&io_tlb_lock, flags);
522}
523
524static void
John W. Linvillede69e0f2005-09-29 14:44:57 -0700525sync_single(struct device *hwdev, char *dma_addr, size_t size,
526 int dir, int target)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527{
Becky Brucebc40ac62008-12-22 10:26:08 -0800528 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
529 phys_addr_t phys = io_tlb_orig_addr[index];
530
531 phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
Keir Fraserdf336d12007-07-21 04:37:24 -0700532
John W. Linvillede69e0f2005-09-29 14:44:57 -0700533 switch (target) {
534 case SYNC_FOR_CPU:
535 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
Becky Brucefb05a372008-12-22 10:26:09 -0800536 swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
Eric Sesterhenn34814542006-03-24 18:47:11 +0100537 else
538 BUG_ON(dir != DMA_TO_DEVICE);
John W. Linvillede69e0f2005-09-29 14:44:57 -0700539 break;
540 case SYNC_FOR_DEVICE:
541 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
Becky Brucefb05a372008-12-22 10:26:09 -0800542 swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
Eric Sesterhenn34814542006-03-24 18:47:11 +0100543 else
544 BUG_ON(dir != DMA_FROM_DEVICE);
John W. Linvillede69e0f2005-09-29 14:44:57 -0700545 break;
546 default:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 BUG();
John W. Linvillede69e0f2005-09-29 14:44:57 -0700548 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549}
550
551void *
552swiotlb_alloc_coherent(struct device *hwdev, size_t size,
Al Viro06a54492005-10-21 03:21:03 -0400553 dma_addr_t *dma_handle, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554{
Jan Beulich563aaf02007-02-05 18:51:25 -0800555 dma_addr_t dev_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 void *ret;
557 int order = get_order(size);
Yang Hongyang284901a2009-04-06 19:01:15 -0700558 u64 dma_mask = DMA_BIT_MASK(32);
FUJITA Tomonori1e74f302008-11-17 16:24:34 +0900559
560 if (hwdev && hwdev->coherent_dma_mask)
561 dma_mask = hwdev->coherent_dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
Tony Luck25667d62007-03-06 13:31:45 -0800563 ret = (void *)__get_free_pages(flags, order);
Jeremy Fitzhardinge70a7d3c2008-12-22 10:26:05 -0800564 if (ret &&
565 !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret),
566 size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 /*
568 * The allocated memory isn't reachable by the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 */
570 free_pages((unsigned long) ret, order);
571 ret = NULL;
572 }
573 if (!ret) {
574 /*
575 * We are either out of memory or the device can't DMA
Becky Bruceceb5ac32009-04-08 09:09:15 -0500576 * to GFP_DMA memory; fall back on map_single(), which
577 * will grab memory from the lowest available address range.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 */
Becky Brucebc40ac62008-12-22 10:26:08 -0800579 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
FUJITA Tomonori9dfda122008-09-08 18:53:48 +0900580 if (!ret)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 }
583
584 memset(ret, 0, size);
Jeremy Fitzhardinge70a7d3c2008-12-22 10:26:05 -0800585 dev_addr = swiotlb_virt_to_bus(hwdev, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586
587 /* Confirm address can be DMA'd by device */
FUJITA Tomonori1e74f302008-11-17 16:24:34 +0900588 if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
Jan Beulich563aaf02007-02-05 18:51:25 -0800589 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
FUJITA Tomonori1e74f302008-11-17 16:24:34 +0900590 (unsigned long long)dma_mask,
Jan Beulich563aaf02007-02-05 18:51:25 -0800591 (unsigned long long)dev_addr);
FUJITA Tomonoria2b89b52008-10-23 18:42:03 +0900592
593 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
Becky Bruce7fcebbd2009-04-08 09:09:19 -0500594 do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
FUJITA Tomonoria2b89b52008-10-23 18:42:03 +0900595 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 }
597 *dma_handle = dev_addr;
598 return ret;
599}
FUJITA Tomonori874d6a92008-12-28 15:02:07 +0900600EXPORT_SYMBOL(swiotlb_alloc_coherent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
602void
603swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
604 dma_addr_t dma_handle)
605{
David Brownellaa248862007-08-10 13:10:27 -0700606 WARN_ON(irqs_disabled());
FUJITA Tomonori640aebf2008-09-08 18:53:50 +0900607 if (!is_swiotlb_buffer(vaddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 free_pages((unsigned long) vaddr, get_order(size));
609 else
610 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
Becky Bruce7fcebbd2009-04-08 09:09:19 -0500611 do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612}
FUJITA Tomonori874d6a92008-12-28 15:02:07 +0900613EXPORT_SYMBOL(swiotlb_free_coherent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
615static void
616swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
617{
618 /*
619 * Ran out of IOMMU space for this operation. This is very bad.
620 * Unfortunately the drivers cannot handle this operation properly.
Tony Luck17e5ad62005-09-29 15:52:13 -0700621 * unless they check for dma_mapping_error (most don't)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 * When the mapping is small enough return a static buffer to limit
623 * the damage, or panic when the transfer is too big.
624 */
Jan Beulich563aaf02007-02-05 18:51:25 -0800625 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
Kay Sievers94b32482009-01-06 10:44:37 -0800626 "device %s\n", size, dev ? dev_name(dev) : "?");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
628 if (size > io_tlb_overflow && do_panic) {
Tony Luck17e5ad62005-09-29 15:52:13 -0700629 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
630 panic("DMA: Memory would be corrupted\n");
631 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
632 panic("DMA: Random memory would be DMAed\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 }
634}
635
636/*
637 * Map a single buffer of the indicated size for DMA in streaming mode. The
Tony Luck17e5ad62005-09-29 15:52:13 -0700638 * physical address to use is returned.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 *
640 * Once the device is given the dma address, the device owns this memory until
Becky Bruceceb5ac32009-04-08 09:09:15 -0500641 * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 */
FUJITA Tomonorif98eee82009-01-05 23:59:03 +0900643dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
644 unsigned long offset, size_t size,
645 enum dma_data_direction dir,
646 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647{
FUJITA Tomonorif98eee82009-01-05 23:59:03 +0900648 phys_addr_t phys = page_to_phys(page) + offset;
FUJITA Tomonorif98eee82009-01-05 23:59:03 +0900649 dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 void *map;
651
Eric Sesterhenn34814542006-03-24 18:47:11 +0100652 BUG_ON(dir == DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 /*
Becky Bruceceb5ac32009-04-08 09:09:15 -0500654 * If the address happens to be in the device's DMA window,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 * we can safely return the device addr and not worry about bounce
656 * buffering it.
657 */
FUJITA Tomonorif98eee82009-01-05 23:59:03 +0900658 if (!address_needs_mapping(dev, dev_addr, size) &&
Becky Brucedd6b02f2009-04-08 09:09:17 -0500659 !range_needs_mapping(phys, size))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 return dev_addr;
661
662 /*
663 * Oh well, have to allocate and map a bounce buffer.
664 */
FUJITA Tomonorif98eee82009-01-05 23:59:03 +0900665 map = map_single(dev, phys, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 if (!map) {
FUJITA Tomonorif98eee82009-01-05 23:59:03 +0900667 swiotlb_full(dev, size, dir, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 map = io_tlb_overflow_buffer;
669 }
670
FUJITA Tomonorif98eee82009-01-05 23:59:03 +0900671 dev_addr = swiotlb_virt_to_bus(dev, map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672
673 /*
674 * Ensure that the address returned is DMA'ble
675 */
FUJITA Tomonorif98eee82009-01-05 23:59:03 +0900676 if (address_needs_mapping(dev, dev_addr, size))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 panic("map_single: bounce buffer is not DMA'ble");
678
679 return dev_addr;
680}
FUJITA Tomonorif98eee82009-01-05 23:59:03 +0900681EXPORT_SYMBOL_GPL(swiotlb_map_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
683/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 * Unmap a single streaming mode DMA translation. The dma_addr and size must
Becky Bruceceb5ac32009-04-08 09:09:15 -0500685 * match what was provided for in a previous swiotlb_map_page call. All
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 * other usages are undefined.
687 *
688 * After this call, reads by the cpu to the buffer are guaranteed to see
689 * whatever the device wrote there.
690 */
Becky Bruce7fcebbd2009-04-08 09:09:19 -0500691static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
692 size_t size, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693{
Becky Bruce42d7c5e2009-04-08 09:09:21 -0500694 char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
Eric Sesterhenn34814542006-03-24 18:47:11 +0100696 BUG_ON(dir == DMA_NONE);
Becky Bruce7fcebbd2009-04-08 09:09:19 -0500697
698 if (is_swiotlb_buffer(dma_addr)) {
699 do_unmap_single(hwdev, dma_addr, size, dir);
700 return;
701 }
702
703 if (dir != DMA_FROM_DEVICE)
704 return;
705
706 dma_mark_clean(dma_addr, size);
707}
708
709void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
710 size_t size, enum dma_data_direction dir,
711 struct dma_attrs *attrs)
712{
713 unmap_single(hwdev, dev_addr, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714}
FUJITA Tomonorif98eee82009-01-05 23:59:03 +0900715EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
FUJITA Tomonori874d6a92008-12-28 15:02:07 +0900716
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717/*
718 * Make physical memory consistent for a single streaming mode DMA translation
719 * after a transfer.
720 *
Becky Bruceceb5ac32009-04-08 09:09:15 -0500721 * If you perform a swiotlb_map_page() but wish to interrogate the buffer
Tony Luck17e5ad62005-09-29 15:52:13 -0700722 * using the cpu, yet do not wish to teardown the dma mapping, you must
723 * call this function before doing so. At the next point you give the dma
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 * address back to the card, you must first perform a
725 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
726 */
Andrew Mortonbe6b0262007-02-12 00:52:17 -0800727static void
John W. Linville8270f3f2005-09-29 14:43:32 -0700728swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
John W. Linvillede69e0f2005-09-29 14:44:57 -0700729 size_t size, int dir, int target)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730{
Becky Bruce42d7c5e2009-04-08 09:09:21 -0500731 char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732
Eric Sesterhenn34814542006-03-24 18:47:11 +0100733 BUG_ON(dir == DMA_NONE);
Becky Bruce380d6872009-04-08 09:09:20 -0500734
735 if (is_swiotlb_buffer(dma_addr)) {
John W. Linvillede69e0f2005-09-29 14:44:57 -0700736 sync_single(hwdev, dma_addr, size, dir, target);
Becky Bruce380d6872009-04-08 09:09:20 -0500737 return;
738 }
739
740 if (dir != DMA_FROM_DEVICE)
741 return;
742
743 dma_mark_clean(dma_addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744}
745
746void
John W. Linville8270f3f2005-09-29 14:43:32 -0700747swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900748 size_t size, enum dma_data_direction dir)
John W. Linville8270f3f2005-09-29 14:43:32 -0700749{
John W. Linvillede69e0f2005-09-29 14:44:57 -0700750 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
John W. Linville8270f3f2005-09-29 14:43:32 -0700751}
FUJITA Tomonori874d6a92008-12-28 15:02:07 +0900752EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
John W. Linville8270f3f2005-09-29 14:43:32 -0700753
754void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900756 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757{
John W. Linvillede69e0f2005-09-29 14:44:57 -0700758 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759}
FUJITA Tomonori874d6a92008-12-28 15:02:07 +0900760EXPORT_SYMBOL(swiotlb_sync_single_for_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761
762/*
John W. Linville878a97c2005-09-29 14:44:23 -0700763 * Same as above, but for a sub-range of the mapping.
764 */
Andrew Mortonbe6b0262007-02-12 00:52:17 -0800765static void
John W. Linville878a97c2005-09-29 14:44:23 -0700766swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
John W. Linvillede69e0f2005-09-29 14:44:57 -0700767 unsigned long offset, size_t size,
768 int dir, int target)
John W. Linville878a97c2005-09-29 14:44:23 -0700769{
Becky Bruce380d6872009-04-08 09:09:20 -0500770 swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
John W. Linville878a97c2005-09-29 14:44:23 -0700771}
772
773void
774swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900775 unsigned long offset, size_t size,
776 enum dma_data_direction dir)
John W. Linville878a97c2005-09-29 14:44:23 -0700777{
John W. Linvillede69e0f2005-09-29 14:44:57 -0700778 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
779 SYNC_FOR_CPU);
John W. Linville878a97c2005-09-29 14:44:23 -0700780}
FUJITA Tomonori874d6a92008-12-28 15:02:07 +0900781EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
John W. Linville878a97c2005-09-29 14:44:23 -0700782
783void
784swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900785 unsigned long offset, size_t size,
786 enum dma_data_direction dir)
John W. Linville878a97c2005-09-29 14:44:23 -0700787{
John W. Linvillede69e0f2005-09-29 14:44:57 -0700788 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
789 SYNC_FOR_DEVICE);
John W. Linville878a97c2005-09-29 14:44:23 -0700790}
FUJITA Tomonori874d6a92008-12-28 15:02:07 +0900791EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
John W. Linville878a97c2005-09-29 14:44:23 -0700792
793/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 * Map a set of buffers described by scatterlist in streaming mode for DMA.
Becky Bruceceb5ac32009-04-08 09:09:15 -0500795 * This is the scatter-gather version of the above swiotlb_map_page
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 * interface. Here the scatter gather list elements are each tagged with the
797 * appropriate dma address and length. They are obtained via
798 * sg_dma_{address,length}(SG).
799 *
800 * NOTE: An implementation may be able to use a smaller number of
801 * DMA address/length pairs than there are SG table elements.
802 * (for example via virtual mapping capabilities)
803 * The routine returns the number of addr/length pairs actually
804 * used, at most nents.
805 *
Becky Bruceceb5ac32009-04-08 09:09:15 -0500806 * Device ownership issues as mentioned above for swiotlb_map_page are the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 * same here.
808 */
809int
Arthur Kepner309df0c2008-04-29 01:00:32 -0700810swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900811 enum dma_data_direction dir, struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812{
Jens Axboedbfd49f2007-05-11 14:56:18 +0200813 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 int i;
815
Eric Sesterhenn34814542006-03-24 18:47:11 +0100816 BUG_ON(dir == DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817
Jens Axboedbfd49f2007-05-11 14:56:18 +0200818 for_each_sg(sgl, sg, nelems, i) {
Ian Campbell961d7d02009-01-09 18:32:10 +0000819 phys_addr_t paddr = sg_phys(sg);
820 dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr);
Becky Brucebc40ac62008-12-22 10:26:08 -0800821
Ian Campbell961d7d02009-01-09 18:32:10 +0000822 if (range_needs_mapping(paddr, sg->length) ||
FUJITA Tomonori27979822008-09-10 01:06:49 +0900823 address_needs_mapping(hwdev, dev_addr, sg->length)) {
Becky Brucebc40ac62008-12-22 10:26:08 -0800824 void *map = map_single(hwdev, sg_phys(sg),
825 sg->length, dir);
Andi Kleen7e870232005-12-20 14:45:19 +0100826 if (!map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 /* Don't panic here, we expect map_sg users
828 to do proper error handling. */
829 swiotlb_full(hwdev, sg->length, dir, 0);
Arthur Kepner309df0c2008-04-29 01:00:32 -0700830 swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
831 attrs);
Jens Axboedbfd49f2007-05-11 14:56:18 +0200832 sgl[0].dma_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 return 0;
834 }
Jeremy Fitzhardinge70a7d3c2008-12-22 10:26:05 -0800835 sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 } else
837 sg->dma_address = dev_addr;
838 sg->dma_length = sg->length;
839 }
840 return nelems;
841}
Arthur Kepner309df0c2008-04-29 01:00:32 -0700842EXPORT_SYMBOL(swiotlb_map_sg_attrs);
843
844int
845swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
846 int dir)
847{
848 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
849}
FUJITA Tomonori874d6a92008-12-28 15:02:07 +0900850EXPORT_SYMBOL(swiotlb_map_sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
852/*
853 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
Becky Bruceceb5ac32009-04-08 09:09:15 -0500854 * concerning calls here are the same as for swiotlb_unmap_page() above.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 */
856void
Arthur Kepner309df0c2008-04-29 01:00:32 -0700857swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900858 int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859{
Jens Axboedbfd49f2007-05-11 14:56:18 +0200860 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 int i;
862
Eric Sesterhenn34814542006-03-24 18:47:11 +0100863 BUG_ON(dir == DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
Becky Bruce7fcebbd2009-04-08 09:09:19 -0500865 for_each_sg(sgl, sg, nelems, i)
866 unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
867
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868}
Arthur Kepner309df0c2008-04-29 01:00:32 -0700869EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
870
871void
872swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
873 int dir)
874{
875 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
876}
FUJITA Tomonori874d6a92008-12-28 15:02:07 +0900877EXPORT_SYMBOL(swiotlb_unmap_sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
879/*
880 * Make physical memory consistent for a set of streaming mode DMA translations
881 * after a transfer.
882 *
883 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
884 * and usage.
885 */
Andrew Mortonbe6b0262007-02-12 00:52:17 -0800886static void
Jens Axboedbfd49f2007-05-11 14:56:18 +0200887swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
John W. Linvillede69e0f2005-09-29 14:44:57 -0700888 int nelems, int dir, int target)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889{
Jens Axboedbfd49f2007-05-11 14:56:18 +0200890 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 int i;
892
Becky Bruce380d6872009-04-08 09:09:20 -0500893 for_each_sg(sgl, sg, nelems, i)
894 swiotlb_sync_single(hwdev, sg->dma_address,
John W. Linvillede69e0f2005-09-29 14:44:57 -0700895 sg->dma_length, dir, target);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896}
897
898void
John W. Linville8270f3f2005-09-29 14:43:32 -0700899swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900900 int nelems, enum dma_data_direction dir)
John W. Linville8270f3f2005-09-29 14:43:32 -0700901{
John W. Linvillede69e0f2005-09-29 14:44:57 -0700902 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
John W. Linville8270f3f2005-09-29 14:43:32 -0700903}
FUJITA Tomonori874d6a92008-12-28 15:02:07 +0900904EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
John W. Linville8270f3f2005-09-29 14:43:32 -0700905
906void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900908 int nelems, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909{
John W. Linvillede69e0f2005-09-29 14:44:57 -0700910 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911}
FUJITA Tomonori874d6a92008-12-28 15:02:07 +0900912EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
914int
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700915swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916{
Jeremy Fitzhardinge70a7d3c2008-12-22 10:26:05 -0800917 return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918}
FUJITA Tomonori874d6a92008-12-28 15:02:07 +0900919EXPORT_SYMBOL(swiotlb_dma_mapping_error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920
921/*
Tony Luck17e5ad62005-09-29 15:52:13 -0700922 * Return whether the given device DMA address mask can be supported
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 * properly. For example, if your device can only drive the low 24-bits
Tony Luck17e5ad62005-09-29 15:52:13 -0700924 * during bus mastering, then you would pass 0x00ffffff as the mask to
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 * this function.
926 */
927int
Jan Beulich563aaf02007-02-05 18:51:25 -0800928swiotlb_dma_supported(struct device *hwdev, u64 mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929{
Jeremy Fitzhardinge70a7d3c2008-12-22 10:26:05 -0800930 return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932EXPORT_SYMBOL(swiotlb_dma_supported);