blob: 6fd8d583613807904eac33ab9ce5030709545f73 [file] [log] [blame]
Jan Glauber828b35f2012-11-29 14:33:30 +01001/*
2 * Copyright IBM Corp. 2012
3 *
4 * Author(s):
5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/slab.h>
10#include <linux/export.h>
11#include <linux/iommu-helper.h>
12#include <linux/dma-mapping.h>
Sebastian Ott22459322013-08-29 20:31:50 +020013#include <linux/vmalloc.h>
Jan Glauber828b35f2012-11-29 14:33:30 +010014#include <linux/pci.h>
15#include <asm/pci_dma.h>
16
Jan Glauber828b35f2012-11-29 14:33:30 +010017static struct kmem_cache *dma_region_table_cache;
18static struct kmem_cache *dma_page_table_cache;
Gerald Schaeferc60d1ae2014-07-18 17:37:08 +020019static int s390_iommu_strict;
20
21static int zpci_refresh_global(struct zpci_dev *zdev)
22{
23 return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma,
24 zdev->iommu_pages * PAGE_SIZE);
25}
Jan Glauber828b35f2012-11-29 14:33:30 +010026
27static unsigned long *dma_alloc_cpu_table(void)
28{
29 unsigned long *table, *entry;
30
31 table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
32 if (!table)
33 return NULL;
34
35 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
36 *entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED;
37 return table;
38}
39
40static void dma_free_cpu_table(void *table)
41{
42 kmem_cache_free(dma_region_table_cache, table);
43}
44
45static unsigned long *dma_alloc_page_table(void)
46{
47 unsigned long *table, *entry;
48
49 table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
50 if (!table)
51 return NULL;
52
53 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
54 *entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED;
55 return table;
56}
57
58static void dma_free_page_table(void *table)
59{
60 kmem_cache_free(dma_page_table_cache, table);
61}
62
63static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
64{
65 unsigned long *sto;
66
67 if (reg_entry_isvalid(*entry))
68 sto = get_rt_sto(*entry);
69 else {
70 sto = dma_alloc_cpu_table();
71 if (!sto)
72 return NULL;
73
74 set_rt_sto(entry, sto);
75 validate_rt_entry(entry);
76 entry_clr_protected(entry);
77 }
78 return sto;
79}
80
81static unsigned long *dma_get_page_table_origin(unsigned long *entry)
82{
83 unsigned long *pto;
84
85 if (reg_entry_isvalid(*entry))
86 pto = get_st_pto(*entry);
87 else {
88 pto = dma_alloc_page_table();
89 if (!pto)
90 return NULL;
91 set_st_pto(entry, pto);
92 validate_st_entry(entry);
93 entry_clr_protected(entry);
94 }
95 return pto;
96}
97
98static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
99{
100 unsigned long *sto, *pto;
101 unsigned int rtx, sx, px;
102
103 rtx = calc_rtx(dma_addr);
104 sto = dma_get_seg_table_origin(&rto[rtx]);
105 if (!sto)
106 return NULL;
107
108 sx = calc_sx(dma_addr);
109 pto = dma_get_page_table_origin(&sto[sx]);
110 if (!pto)
111 return NULL;
112
113 px = calc_px(dma_addr);
114 return &pto[px];
115}
116
117static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr,
118 dma_addr_t dma_addr, int flags)
119{
120 unsigned long *entry;
121
122 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
123 if (!entry) {
124 WARN_ON_ONCE(1);
125 return;
126 }
127
128 if (flags & ZPCI_PTE_INVALID) {
129 invalidate_pt_entry(entry);
130 return;
131 } else {
132 set_pt_pfaa(entry, page_addr);
133 validate_pt_entry(entry);
134 }
135
136 if (flags & ZPCI_TABLE_PROTECTED)
137 entry_set_protected(entry);
138 else
139 entry_clr_protected(entry);
140}
141
142static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
143 dma_addr_t dma_addr, size_t size, int flags)
144{
145 unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
146 u8 *page_addr = (u8 *) (pa & PAGE_MASK);
147 dma_addr_t start_dma_addr = dma_addr;
148 unsigned long irq_flags;
149 int i, rc = 0;
150
151 if (!nr_pages)
152 return -EINVAL;
153
154 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
Sebastian Ott1f1dcbd2013-10-22 15:17:19 +0200155 if (!zdev->dma_table)
Jan Glauber828b35f2012-11-29 14:33:30 +0100156 goto no_refresh;
Jan Glauber828b35f2012-11-29 14:33:30 +0100157
158 for (i = 0; i < nr_pages; i++) {
159 dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
160 page_addr += PAGE_SIZE;
161 dma_addr += PAGE_SIZE;
162 }
163
164 /*
Gerald Schaeferc60d1ae2014-07-18 17:37:08 +0200165 * With zdev->tlb_refresh == 0, rpcit is not required to establish new
166 * translations when previously invalid translation-table entries are
167 * validated. With lazy unmap, it also is skipped for previously valid
168 * entries, but a global rpcit is then required before any address can
169 * be re-used, i.e. after each iommu bitmap wrap-around.
Jan Glauber828b35f2012-11-29 14:33:30 +0100170 */
171 if (!zdev->tlb_refresh &&
Gerald Schaeferc60d1ae2014-07-18 17:37:08 +0200172 (!s390_iommu_strict ||
173 ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)))
Jan Glauber828b35f2012-11-29 14:33:30 +0100174 goto no_refresh;
Sebastian Ottb2a9e872013-04-16 14:15:42 +0200175
Martin Schwidefsky93893392013-06-25 14:52:23 +0200176 rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
177 nr_pages * PAGE_SIZE);
Jan Glauber828b35f2012-11-29 14:33:30 +0100178
179no_refresh:
180 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
181 return rc;
182}
183
184static void dma_free_seg_table(unsigned long entry)
185{
186 unsigned long *sto = get_rt_sto(entry);
187 int sx;
188
189 for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
190 if (reg_entry_isvalid(sto[sx]))
191 dma_free_page_table(get_st_pto(sto[sx]));
192
193 dma_free_cpu_table(sto);
194}
195
196static void dma_cleanup_tables(struct zpci_dev *zdev)
197{
Wei Yongjunbafff172012-12-03 16:15:00 +0100198 unsigned long *table;
Jan Glauber828b35f2012-11-29 14:33:30 +0100199 int rtx;
200
201 if (!zdev || !zdev->dma_table)
202 return;
203
Wei Yongjunbafff172012-12-03 16:15:00 +0100204 table = zdev->dma_table;
Jan Glauber828b35f2012-11-29 14:33:30 +0100205 for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
206 if (reg_entry_isvalid(table[rtx]))
207 dma_free_seg_table(table[rtx]);
208
209 dma_free_cpu_table(table);
210 zdev->dma_table = NULL;
211}
212
Gerald Schaefer5ec6d492014-02-18 19:47:17 +0100213static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev,
214 unsigned long start, int size)
Jan Glauber828b35f2012-11-29 14:33:30 +0100215{
Gerald Schaefer5ec6d492014-02-18 19:47:17 +0100216 unsigned long boundary_size;
Jan Glauber828b35f2012-11-29 14:33:30 +0100217
Gerald Schaefer5ec6d492014-02-18 19:47:17 +0100218 boundary_size = ALIGN(dma_get_seg_boundary(&zdev->pdev->dev) + 1,
219 PAGE_SIZE) >> PAGE_SHIFT;
Jan Glauber828b35f2012-11-29 14:33:30 +0100220 return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
221 start, size, 0, boundary_size, 0);
222}
223
224static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
225{
226 unsigned long offset, flags;
Gerald Schaeferc60d1ae2014-07-18 17:37:08 +0200227 int wrap = 0;
Jan Glauber828b35f2012-11-29 14:33:30 +0100228
229 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
230 offset = __dma_alloc_iommu(zdev, zdev->next_bit, size);
Gerald Schaeferc60d1ae2014-07-18 17:37:08 +0200231 if (offset == -1) {
232 /* wrap-around */
Jan Glauber828b35f2012-11-29 14:33:30 +0100233 offset = __dma_alloc_iommu(zdev, 0, size);
Gerald Schaeferc60d1ae2014-07-18 17:37:08 +0200234 wrap = 1;
235 }
Jan Glauber828b35f2012-11-29 14:33:30 +0100236
237 if (offset != -1) {
238 zdev->next_bit = offset + size;
Gerald Schaeferc60d1ae2014-07-18 17:37:08 +0200239 if (!zdev->tlb_refresh && !s390_iommu_strict && wrap)
240 /* global flush after wrap-around with lazy unmap */
241 zpci_refresh_global(zdev);
Jan Glauber828b35f2012-11-29 14:33:30 +0100242 }
243 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
244 return offset;
245}
246
247static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size)
248{
249 unsigned long flags;
250
251 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
252 if (!zdev->iommu_bitmap)
253 goto out;
254 bitmap_clear(zdev->iommu_bitmap, offset, size);
Gerald Schaeferc60d1ae2014-07-18 17:37:08 +0200255 /*
256 * Lazy flush for unmap: need to move next_bit to avoid address re-use
257 * until wrap-around.
258 */
259 if (!s390_iommu_strict && offset >= zdev->next_bit)
Jan Glauber828b35f2012-11-29 14:33:30 +0100260 zdev->next_bit = offset + size;
261out:
262 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
263}
264
265int dma_set_mask(struct device *dev, u64 mask)
266{
267 if (!dev->dma_mask || !dma_supported(dev, mask))
268 return -EIO;
269
270 *dev->dma_mask = mask;
271 return 0;
272}
273EXPORT_SYMBOL_GPL(dma_set_mask);
274
275static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
276 unsigned long offset, size_t size,
277 enum dma_data_direction direction,
278 struct dma_attrs *attrs)
279{
Sebastian Ott92948962013-05-17 16:33:40 +0200280 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
Jan Glauber828b35f2012-11-29 14:33:30 +0100281 unsigned long nr_pages, iommu_page_index;
282 unsigned long pa = page_to_phys(page) + offset;
283 int flags = ZPCI_PTE_VALID;
284 dma_addr_t dma_addr;
285
Jan Glauber828b35f2012-11-29 14:33:30 +0100286 /* This rounds up number of pages based on size and offset */
287 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
288 iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
289 if (iommu_page_index == -1)
290 goto out_err;
291
292 /* Use rounded up size */
293 size = nr_pages * PAGE_SIZE;
294
295 dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
Sebastian Ott1f1dcbd2013-10-22 15:17:19 +0200296 if (dma_addr + size > zdev->end_dma)
Jan Glauber828b35f2012-11-29 14:33:30 +0100297 goto out_free;
Jan Glauber828b35f2012-11-29 14:33:30 +0100298
299 if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
300 flags |= ZPCI_TABLE_PROTECTED;
301
Jan Glauberd0b08852012-12-11 14:53:35 +0100302 if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
Sebastian Ott60010182015-04-10 14:33:08 +0200303 atomic64_add(nr_pages, &zdev->mapped_pages);
Gerald Schaefer186f50f2013-04-22 19:27:17 +0200304 return dma_addr + (offset & ~PAGE_MASK);
Jan Glauberd0b08852012-12-11 14:53:35 +0100305 }
Jan Glauber828b35f2012-11-29 14:33:30 +0100306
307out_free:
308 dma_free_iommu(zdev, iommu_page_index, nr_pages);
309out_err:
Sebastian Ott1f1dcbd2013-10-22 15:17:19 +0200310 zpci_err("map error:\n");
311 zpci_err_hex(&pa, sizeof(pa));
Jan Glauber828b35f2012-11-29 14:33:30 +0100312 return DMA_ERROR_CODE;
313}
314
315static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
316 size_t size, enum dma_data_direction direction,
317 struct dma_attrs *attrs)
318{
Sebastian Ott92948962013-05-17 16:33:40 +0200319 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
Jan Glauber828b35f2012-11-29 14:33:30 +0100320 unsigned long iommu_page_index;
321 int npages;
322
323 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
324 dma_addr = dma_addr & PAGE_MASK;
325 if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
Sebastian Ott1f1dcbd2013-10-22 15:17:19 +0200326 ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) {
327 zpci_err("unmap error:\n");
328 zpci_err_hex(&dma_addr, sizeof(dma_addr));
329 }
Jan Glauber828b35f2012-11-29 14:33:30 +0100330
Sebastian Ott60010182015-04-10 14:33:08 +0200331 atomic64_add(npages, &zdev->unmapped_pages);
Jan Glauber828b35f2012-11-29 14:33:30 +0100332 iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
333 dma_free_iommu(zdev, iommu_page_index, npages);
334}
335
336static void *s390_dma_alloc(struct device *dev, size_t size,
337 dma_addr_t *dma_handle, gfp_t flag,
338 struct dma_attrs *attrs)
339{
Sebastian Ott92948962013-05-17 16:33:40 +0200340 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
Jan Glauber828b35f2012-11-29 14:33:30 +0100341 struct page *page;
342 unsigned long pa;
343 dma_addr_t map;
344
345 size = PAGE_ALIGN(size);
346 page = alloc_pages(flag, get_order(size));
347 if (!page)
348 return NULL;
Jan Glauberd0b08852012-12-11 14:53:35 +0100349
Jan Glauber828b35f2012-11-29 14:33:30 +0100350 pa = page_to_phys(page);
351 memset((void *) pa, 0, size);
352
353 map = s390_dma_map_pages(dev, page, pa % PAGE_SIZE,
354 size, DMA_BIDIRECTIONAL, NULL);
355 if (dma_mapping_error(dev, map)) {
356 free_pages(pa, get_order(size));
357 return NULL;
358 }
359
Sebastian Ott60010182015-04-10 14:33:08 +0200360 atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
Jan Glauber828b35f2012-11-29 14:33:30 +0100361 if (dma_handle)
362 *dma_handle = map;
363 return (void *) pa;
364}
365
366static void s390_dma_free(struct device *dev, size_t size,
367 void *pa, dma_addr_t dma_handle,
368 struct dma_attrs *attrs)
369{
Sebastian Ottf7038b72013-12-12 17:53:59 +0100370 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
371
372 size = PAGE_ALIGN(size);
Sebastian Ott60010182015-04-10 14:33:08 +0200373 atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
Sebastian Ottf7038b72013-12-12 17:53:59 +0100374 s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Jan Glauber828b35f2012-11-29 14:33:30 +0100375 free_pages((unsigned long) pa, get_order(size));
376}
377
378static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
379 int nr_elements, enum dma_data_direction dir,
380 struct dma_attrs *attrs)
381{
382 int mapped_elements = 0;
383 struct scatterlist *s;
384 int i;
385
386 for_each_sg(sg, s, nr_elements, i) {
387 struct page *page = sg_page(s);
388 s->dma_address = s390_dma_map_pages(dev, page, s->offset,
389 s->length, dir, NULL);
390 if (!dma_mapping_error(dev, s->dma_address)) {
391 s->dma_length = s->length;
392 mapped_elements++;
393 } else
394 goto unmap;
395 }
396out:
397 return mapped_elements;
398
399unmap:
400 for_each_sg(sg, s, mapped_elements, i) {
401 if (s->dma_address)
402 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
403 dir, NULL);
404 s->dma_address = 0;
405 s->dma_length = 0;
406 }
407 mapped_elements = 0;
408 goto out;
409}
410
411static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
412 int nr_elements, enum dma_data_direction dir,
413 struct dma_attrs *attrs)
414{
415 struct scatterlist *s;
416 int i;
417
418 for_each_sg(sg, s, nr_elements, i) {
419 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
420 s->dma_address = 0;
421 s->dma_length = 0;
422 }
423}
424
425int zpci_dma_init_device(struct zpci_dev *zdev)
426{
Jan Glauber828b35f2012-11-29 14:33:30 +0100427 int rc;
428
429 spin_lock_init(&zdev->iommu_bitmap_lock);
430 spin_lock_init(&zdev->dma_table_lock);
431
432 zdev->dma_table = dma_alloc_cpu_table();
433 if (!zdev->dma_table) {
434 rc = -ENOMEM;
435 goto out_clean;
436 }
437
438 zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
439 zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
Sebastian Ott22459322013-08-29 20:31:50 +0200440 zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
Jan Glauber828b35f2012-11-29 14:33:30 +0100441 if (!zdev->iommu_bitmap) {
442 rc = -ENOMEM;
443 goto out_reg;
444 }
445
446 rc = zpci_register_ioat(zdev,
447 0,
448 zdev->start_dma + PAGE_OFFSET,
449 zdev->start_dma + zdev->iommu_size - 1,
450 (u64) zdev->dma_table);
451 if (rc)
452 goto out_reg;
453 return 0;
454
455out_reg:
456 dma_free_cpu_table(zdev->dma_table);
457out_clean:
458 return rc;
459}
460
461void zpci_dma_exit_device(struct zpci_dev *zdev)
462{
463 zpci_unregister_ioat(zdev, 0);
464 dma_cleanup_tables(zdev);
Sebastian Ott22459322013-08-29 20:31:50 +0200465 vfree(zdev->iommu_bitmap);
Jan Glauber828b35f2012-11-29 14:33:30 +0100466 zdev->iommu_bitmap = NULL;
467 zdev->next_bit = 0;
468}
469
470static int __init dma_alloc_cpu_table_caches(void)
471{
472 dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
473 ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
474 0, NULL);
475 if (!dma_region_table_cache)
476 return -ENOMEM;
477
478 dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
479 ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
480 0, NULL);
481 if (!dma_page_table_cache) {
482 kmem_cache_destroy(dma_region_table_cache);
483 return -ENOMEM;
484 }
485 return 0;
486}
487
488int __init zpci_dma_init(void)
489{
490 return dma_alloc_cpu_table_caches();
491}
492
493void zpci_dma_exit(void)
494{
495 kmem_cache_destroy(dma_page_table_cache);
496 kmem_cache_destroy(dma_region_table_cache);
497}
498
499#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
500
501static int __init dma_debug_do_init(void)
502{
503 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
504 return 0;
505}
506fs_initcall(dma_debug_do_init);
507
508struct dma_map_ops s390_dma_ops = {
509 .alloc = s390_dma_alloc,
510 .free = s390_dma_free,
511 .map_sg = s390_dma_map_sg,
512 .unmap_sg = s390_dma_unmap_sg,
513 .map_page = s390_dma_map_pages,
514 .unmap_page = s390_dma_unmap_pages,
515 /* if we support direct DMA this must be conditional */
516 .is_phys = 0,
517 /* dma_supported is unconditionally true without a callback */
518};
519EXPORT_SYMBOL_GPL(s390_dma_ops);
Gerald Schaeferc60d1ae2014-07-18 17:37:08 +0200520
521static int __init s390_iommu_setup(char *str)
522{
523 if (!strncmp(str, "strict", 6))
524 s390_iommu_strict = 1;
525 return 0;
526}
527
528__setup("s390_iommu=", s390_iommu_setup);