blob: 7e5573acb06375791ef82582d400ede99f8fe069 [file] [log] [blame]
Jan Glauber828b35f2012-11-29 14:33:30 +01001/*
2 * Copyright IBM Corp. 2012
3 *
4 * Author(s):
5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/slab.h>
10#include <linux/export.h>
11#include <linux/iommu-helper.h>
12#include <linux/dma-mapping.h>
Sebastian Ott22459322013-08-29 20:31:50 +020013#include <linux/vmalloc.h>
Jan Glauber828b35f2012-11-29 14:33:30 +010014#include <linux/pci.h>
15#include <asm/pci_dma.h>
16
Jan Glauber828b35f2012-11-29 14:33:30 +010017static struct kmem_cache *dma_region_table_cache;
18static struct kmem_cache *dma_page_table_cache;
19
20static unsigned long *dma_alloc_cpu_table(void)
21{
22 unsigned long *table, *entry;
23
24 table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
25 if (!table)
26 return NULL;
27
28 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
29 *entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED;
30 return table;
31}
32
33static void dma_free_cpu_table(void *table)
34{
35 kmem_cache_free(dma_region_table_cache, table);
36}
37
38static unsigned long *dma_alloc_page_table(void)
39{
40 unsigned long *table, *entry;
41
42 table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
43 if (!table)
44 return NULL;
45
46 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
47 *entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED;
48 return table;
49}
50
51static void dma_free_page_table(void *table)
52{
53 kmem_cache_free(dma_page_table_cache, table);
54}
55
56static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
57{
58 unsigned long *sto;
59
60 if (reg_entry_isvalid(*entry))
61 sto = get_rt_sto(*entry);
62 else {
63 sto = dma_alloc_cpu_table();
64 if (!sto)
65 return NULL;
66
67 set_rt_sto(entry, sto);
68 validate_rt_entry(entry);
69 entry_clr_protected(entry);
70 }
71 return sto;
72}
73
74static unsigned long *dma_get_page_table_origin(unsigned long *entry)
75{
76 unsigned long *pto;
77
78 if (reg_entry_isvalid(*entry))
79 pto = get_st_pto(*entry);
80 else {
81 pto = dma_alloc_page_table();
82 if (!pto)
83 return NULL;
84 set_st_pto(entry, pto);
85 validate_st_entry(entry);
86 entry_clr_protected(entry);
87 }
88 return pto;
89}
90
91static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
92{
93 unsigned long *sto, *pto;
94 unsigned int rtx, sx, px;
95
96 rtx = calc_rtx(dma_addr);
97 sto = dma_get_seg_table_origin(&rto[rtx]);
98 if (!sto)
99 return NULL;
100
101 sx = calc_sx(dma_addr);
102 pto = dma_get_page_table_origin(&sto[sx]);
103 if (!pto)
104 return NULL;
105
106 px = calc_px(dma_addr);
107 return &pto[px];
108}
109
110static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr,
111 dma_addr_t dma_addr, int flags)
112{
113 unsigned long *entry;
114
115 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
116 if (!entry) {
117 WARN_ON_ONCE(1);
118 return;
119 }
120
121 if (flags & ZPCI_PTE_INVALID) {
122 invalidate_pt_entry(entry);
123 return;
124 } else {
125 set_pt_pfaa(entry, page_addr);
126 validate_pt_entry(entry);
127 }
128
129 if (flags & ZPCI_TABLE_PROTECTED)
130 entry_set_protected(entry);
131 else
132 entry_clr_protected(entry);
133}
134
135static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
136 dma_addr_t dma_addr, size_t size, int flags)
137{
138 unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
139 u8 *page_addr = (u8 *) (pa & PAGE_MASK);
140 dma_addr_t start_dma_addr = dma_addr;
141 unsigned long irq_flags;
142 int i, rc = 0;
143
144 if (!nr_pages)
145 return -EINVAL;
146
147 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
148 if (!zdev->dma_table) {
149 dev_err(&zdev->pdev->dev, "Missing DMA table\n");
150 goto no_refresh;
151 }
152
153 for (i = 0; i < nr_pages; i++) {
154 dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
155 page_addr += PAGE_SIZE;
156 dma_addr += PAGE_SIZE;
157 }
158
159 /*
160 * rpcit is not required to establish new translations when previously
161 * invalid translation-table entries are validated, however it is
162 * required when altering previously valid entries.
163 */
164 if (!zdev->tlb_refresh &&
165 ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
166 /*
167 * TODO: also need to check that the old entry is indeed INVALID
168 * and not only for one page but for the whole range...
169 * -> now we WARN_ON in that case but with lazy unmap that
170 * needs to be redone!
171 */
172 goto no_refresh;
Sebastian Ottb2a9e872013-04-16 14:15:42 +0200173
Martin Schwidefsky93893392013-06-25 14:52:23 +0200174 rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
175 nr_pages * PAGE_SIZE);
Jan Glauber828b35f2012-11-29 14:33:30 +0100176
177no_refresh:
178 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
179 return rc;
180}
181
182static void dma_free_seg_table(unsigned long entry)
183{
184 unsigned long *sto = get_rt_sto(entry);
185 int sx;
186
187 for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
188 if (reg_entry_isvalid(sto[sx]))
189 dma_free_page_table(get_st_pto(sto[sx]));
190
191 dma_free_cpu_table(sto);
192}
193
194static void dma_cleanup_tables(struct zpci_dev *zdev)
195{
Wei Yongjunbafff172012-12-03 16:15:00 +0100196 unsigned long *table;
Jan Glauber828b35f2012-11-29 14:33:30 +0100197 int rtx;
198
199 if (!zdev || !zdev->dma_table)
200 return;
201
Wei Yongjunbafff172012-12-03 16:15:00 +0100202 table = zdev->dma_table;
Jan Glauber828b35f2012-11-29 14:33:30 +0100203 for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
204 if (reg_entry_isvalid(table[rtx]))
205 dma_free_seg_table(table[rtx]);
206
207 dma_free_cpu_table(table);
208 zdev->dma_table = NULL;
209}
210
211static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start,
212 int size)
213{
214 unsigned long boundary_size = 0x1000000;
215
216 return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
217 start, size, 0, boundary_size, 0);
218}
219
220static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
221{
222 unsigned long offset, flags;
223
224 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
225 offset = __dma_alloc_iommu(zdev, zdev->next_bit, size);
226 if (offset == -1)
227 offset = __dma_alloc_iommu(zdev, 0, size);
228
229 if (offset != -1) {
230 zdev->next_bit = offset + size;
231 if (zdev->next_bit >= zdev->iommu_pages)
232 zdev->next_bit = 0;
233 }
234 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
235 return offset;
236}
237
238static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size)
239{
240 unsigned long flags;
241
242 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
243 if (!zdev->iommu_bitmap)
244 goto out;
245 bitmap_clear(zdev->iommu_bitmap, offset, size);
246 if (offset >= zdev->next_bit)
247 zdev->next_bit = offset + size;
248out:
249 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
250}
251
252int dma_set_mask(struct device *dev, u64 mask)
253{
254 if (!dev->dma_mask || !dma_supported(dev, mask))
255 return -EIO;
256
257 *dev->dma_mask = mask;
258 return 0;
259}
260EXPORT_SYMBOL_GPL(dma_set_mask);
261
262static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
263 unsigned long offset, size_t size,
264 enum dma_data_direction direction,
265 struct dma_attrs *attrs)
266{
Sebastian Ott92948962013-05-17 16:33:40 +0200267 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
Jan Glauber828b35f2012-11-29 14:33:30 +0100268 unsigned long nr_pages, iommu_page_index;
269 unsigned long pa = page_to_phys(page) + offset;
270 int flags = ZPCI_PTE_VALID;
271 dma_addr_t dma_addr;
272
Jan Glauber828b35f2012-11-29 14:33:30 +0100273 /* This rounds up number of pages based on size and offset */
274 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
275 iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
276 if (iommu_page_index == -1)
277 goto out_err;
278
279 /* Use rounded up size */
280 size = nr_pages * PAGE_SIZE;
281
282 dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
283 if (dma_addr + size > zdev->end_dma) {
284 dev_err(dev, "(dma_addr: 0x%16.16LX + size: 0x%16.16lx) > end_dma: 0x%16.16Lx\n",
285 dma_addr, size, zdev->end_dma);
286 goto out_free;
287 }
288
289 if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
290 flags |= ZPCI_TABLE_PROTECTED;
291
Jan Glauberd0b08852012-12-11 14:53:35 +0100292 if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
293 atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages);
Gerald Schaefer186f50f2013-04-22 19:27:17 +0200294 return dma_addr + (offset & ~PAGE_MASK);
Jan Glauberd0b08852012-12-11 14:53:35 +0100295 }
Jan Glauber828b35f2012-11-29 14:33:30 +0100296
297out_free:
298 dma_free_iommu(zdev, iommu_page_index, nr_pages);
299out_err:
300 dev_err(dev, "Failed to map addr: %lx\n", pa);
301 return DMA_ERROR_CODE;
302}
303
304static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
305 size_t size, enum dma_data_direction direction,
306 struct dma_attrs *attrs)
307{
Sebastian Ott92948962013-05-17 16:33:40 +0200308 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
Jan Glauber828b35f2012-11-29 14:33:30 +0100309 unsigned long iommu_page_index;
310 int npages;
311
312 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
313 dma_addr = dma_addr & PAGE_MASK;
314 if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
315 ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID))
316 dev_err(dev, "Failed to unmap addr: %Lx\n", dma_addr);
317
Jan Glauberd0b08852012-12-11 14:53:35 +0100318 atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages);
Jan Glauber828b35f2012-11-29 14:33:30 +0100319 iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
320 dma_free_iommu(zdev, iommu_page_index, npages);
321}
322
323static void *s390_dma_alloc(struct device *dev, size_t size,
324 dma_addr_t *dma_handle, gfp_t flag,
325 struct dma_attrs *attrs)
326{
Sebastian Ott92948962013-05-17 16:33:40 +0200327 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
Jan Glauber828b35f2012-11-29 14:33:30 +0100328 struct page *page;
329 unsigned long pa;
330 dma_addr_t map;
331
332 size = PAGE_ALIGN(size);
333 page = alloc_pages(flag, get_order(size));
334 if (!page)
335 return NULL;
Jan Glauberd0b08852012-12-11 14:53:35 +0100336
337 atomic64_add(size / PAGE_SIZE, (atomic64_t *) &zdev->fmb->allocated_pages);
Jan Glauber828b35f2012-11-29 14:33:30 +0100338 pa = page_to_phys(page);
339 memset((void *) pa, 0, size);
340
341 map = s390_dma_map_pages(dev, page, pa % PAGE_SIZE,
342 size, DMA_BIDIRECTIONAL, NULL);
343 if (dma_mapping_error(dev, map)) {
344 free_pages(pa, get_order(size));
345 return NULL;
346 }
347
348 if (dma_handle)
349 *dma_handle = map;
350 return (void *) pa;
351}
352
353static void s390_dma_free(struct device *dev, size_t size,
354 void *pa, dma_addr_t dma_handle,
355 struct dma_attrs *attrs)
356{
357 s390_dma_unmap_pages(dev, dma_handle, PAGE_ALIGN(size),
358 DMA_BIDIRECTIONAL, NULL);
359 free_pages((unsigned long) pa, get_order(size));
360}
361
362static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
363 int nr_elements, enum dma_data_direction dir,
364 struct dma_attrs *attrs)
365{
366 int mapped_elements = 0;
367 struct scatterlist *s;
368 int i;
369
370 for_each_sg(sg, s, nr_elements, i) {
371 struct page *page = sg_page(s);
372 s->dma_address = s390_dma_map_pages(dev, page, s->offset,
373 s->length, dir, NULL);
374 if (!dma_mapping_error(dev, s->dma_address)) {
375 s->dma_length = s->length;
376 mapped_elements++;
377 } else
378 goto unmap;
379 }
380out:
381 return mapped_elements;
382
383unmap:
384 for_each_sg(sg, s, mapped_elements, i) {
385 if (s->dma_address)
386 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
387 dir, NULL);
388 s->dma_address = 0;
389 s->dma_length = 0;
390 }
391 mapped_elements = 0;
392 goto out;
393}
394
395static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
396 int nr_elements, enum dma_data_direction dir,
397 struct dma_attrs *attrs)
398{
399 struct scatterlist *s;
400 int i;
401
402 for_each_sg(sg, s, nr_elements, i) {
403 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
404 s->dma_address = 0;
405 s->dma_length = 0;
406 }
407}
408
409int zpci_dma_init_device(struct zpci_dev *zdev)
410{
Jan Glauber828b35f2012-11-29 14:33:30 +0100411 int rc;
412
413 spin_lock_init(&zdev->iommu_bitmap_lock);
414 spin_lock_init(&zdev->dma_table_lock);
415
416 zdev->dma_table = dma_alloc_cpu_table();
417 if (!zdev->dma_table) {
418 rc = -ENOMEM;
419 goto out_clean;
420 }
421
422 zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
423 zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
Sebastian Ott22459322013-08-29 20:31:50 +0200424 zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
Jan Glauber828b35f2012-11-29 14:33:30 +0100425 if (!zdev->iommu_bitmap) {
426 rc = -ENOMEM;
427 goto out_reg;
428 }
429
430 rc = zpci_register_ioat(zdev,
431 0,
432 zdev->start_dma + PAGE_OFFSET,
433 zdev->start_dma + zdev->iommu_size - 1,
434 (u64) zdev->dma_table);
435 if (rc)
436 goto out_reg;
437 return 0;
438
439out_reg:
440 dma_free_cpu_table(zdev->dma_table);
441out_clean:
442 return rc;
443}
444
445void zpci_dma_exit_device(struct zpci_dev *zdev)
446{
447 zpci_unregister_ioat(zdev, 0);
448 dma_cleanup_tables(zdev);
Sebastian Ott22459322013-08-29 20:31:50 +0200449 vfree(zdev->iommu_bitmap);
Jan Glauber828b35f2012-11-29 14:33:30 +0100450 zdev->iommu_bitmap = NULL;
451 zdev->next_bit = 0;
452}
453
454static int __init dma_alloc_cpu_table_caches(void)
455{
456 dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
457 ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
458 0, NULL);
459 if (!dma_region_table_cache)
460 return -ENOMEM;
461
462 dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
463 ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
464 0, NULL);
465 if (!dma_page_table_cache) {
466 kmem_cache_destroy(dma_region_table_cache);
467 return -ENOMEM;
468 }
469 return 0;
470}
471
472int __init zpci_dma_init(void)
473{
474 return dma_alloc_cpu_table_caches();
475}
476
477void zpci_dma_exit(void)
478{
479 kmem_cache_destroy(dma_page_table_cache);
480 kmem_cache_destroy(dma_region_table_cache);
481}
482
483#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
484
485static int __init dma_debug_do_init(void)
486{
487 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
488 return 0;
489}
490fs_initcall(dma_debug_do_init);
491
492struct dma_map_ops s390_dma_ops = {
493 .alloc = s390_dma_alloc,
494 .free = s390_dma_free,
495 .map_sg = s390_dma_map_sg,
496 .unmap_sg = s390_dma_unmap_sg,
497 .map_page = s390_dma_map_pages,
498 .unmap_page = s390_dma_unmap_pages,
499 /* if we support direct DMA this must be conditional */
500 .is_phys = 0,
501 /* dma_supported is unconditionally true without a callback */
502};
503EXPORT_SYMBOL_GPL(s390_dma_ops);