blob: 60e4999e6b67bbfd7f4594de2072cb46726301db [file] [log] [blame]
Jan Glauber828b35f2012-11-29 14:33:30 +01001/*
2 * Copyright IBM Corp. 2012
3 *
4 * Author(s):
5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/slab.h>
10#include <linux/export.h>
11#include <linux/iommu-helper.h>
12#include <linux/dma-mapping.h>
13#include <linux/pci.h>
14#include <asm/pci_dma.h>
15
Jan Glauber828b35f2012-11-29 14:33:30 +010016static struct kmem_cache *dma_region_table_cache;
17static struct kmem_cache *dma_page_table_cache;
18
19static unsigned long *dma_alloc_cpu_table(void)
20{
21 unsigned long *table, *entry;
22
23 table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
24 if (!table)
25 return NULL;
26
27 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
28 *entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED;
29 return table;
30}
31
32static void dma_free_cpu_table(void *table)
33{
34 kmem_cache_free(dma_region_table_cache, table);
35}
36
37static unsigned long *dma_alloc_page_table(void)
38{
39 unsigned long *table, *entry;
40
41 table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
42 if (!table)
43 return NULL;
44
45 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
46 *entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED;
47 return table;
48}
49
50static void dma_free_page_table(void *table)
51{
52 kmem_cache_free(dma_page_table_cache, table);
53}
54
55static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
56{
57 unsigned long *sto;
58
59 if (reg_entry_isvalid(*entry))
60 sto = get_rt_sto(*entry);
61 else {
62 sto = dma_alloc_cpu_table();
63 if (!sto)
64 return NULL;
65
66 set_rt_sto(entry, sto);
67 validate_rt_entry(entry);
68 entry_clr_protected(entry);
69 }
70 return sto;
71}
72
73static unsigned long *dma_get_page_table_origin(unsigned long *entry)
74{
75 unsigned long *pto;
76
77 if (reg_entry_isvalid(*entry))
78 pto = get_st_pto(*entry);
79 else {
80 pto = dma_alloc_page_table();
81 if (!pto)
82 return NULL;
83 set_st_pto(entry, pto);
84 validate_st_entry(entry);
85 entry_clr_protected(entry);
86 }
87 return pto;
88}
89
90static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
91{
92 unsigned long *sto, *pto;
93 unsigned int rtx, sx, px;
94
95 rtx = calc_rtx(dma_addr);
96 sto = dma_get_seg_table_origin(&rto[rtx]);
97 if (!sto)
98 return NULL;
99
100 sx = calc_sx(dma_addr);
101 pto = dma_get_page_table_origin(&sto[sx]);
102 if (!pto)
103 return NULL;
104
105 px = calc_px(dma_addr);
106 return &pto[px];
107}
108
109static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr,
110 dma_addr_t dma_addr, int flags)
111{
112 unsigned long *entry;
113
114 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
115 if (!entry) {
116 WARN_ON_ONCE(1);
117 return;
118 }
119
120 if (flags & ZPCI_PTE_INVALID) {
121 invalidate_pt_entry(entry);
122 return;
123 } else {
124 set_pt_pfaa(entry, page_addr);
125 validate_pt_entry(entry);
126 }
127
128 if (flags & ZPCI_TABLE_PROTECTED)
129 entry_set_protected(entry);
130 else
131 entry_clr_protected(entry);
132}
133
134static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
135 dma_addr_t dma_addr, size_t size, int flags)
136{
137 unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
138 u8 *page_addr = (u8 *) (pa & PAGE_MASK);
139 dma_addr_t start_dma_addr = dma_addr;
140 unsigned long irq_flags;
141 int i, rc = 0;
142
143 if (!nr_pages)
144 return -EINVAL;
145
146 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
147 if (!zdev->dma_table) {
148 dev_err(&zdev->pdev->dev, "Missing DMA table\n");
149 goto no_refresh;
150 }
151
152 for (i = 0; i < nr_pages; i++) {
153 dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
154 page_addr += PAGE_SIZE;
155 dma_addr += PAGE_SIZE;
156 }
157
158 /*
159 * rpcit is not required to establish new translations when previously
160 * invalid translation-table entries are validated, however it is
161 * required when altering previously valid entries.
162 */
163 if (!zdev->tlb_refresh &&
164 ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
165 /*
166 * TODO: also need to check that the old entry is indeed INVALID
167 * and not only for one page but for the whole range...
168 * -> now we WARN_ON in that case but with lazy unmap that
169 * needs to be redone!
170 */
171 goto no_refresh;
Sebastian Ottb2a9e872013-04-16 14:15:42 +0200172
173 rc = s390pci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
174 nr_pages * PAGE_SIZE);
Jan Glauber828b35f2012-11-29 14:33:30 +0100175
176no_refresh:
177 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
178 return rc;
179}
180
181static void dma_free_seg_table(unsigned long entry)
182{
183 unsigned long *sto = get_rt_sto(entry);
184 int sx;
185
186 for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
187 if (reg_entry_isvalid(sto[sx]))
188 dma_free_page_table(get_st_pto(sto[sx]));
189
190 dma_free_cpu_table(sto);
191}
192
193static void dma_cleanup_tables(struct zpci_dev *zdev)
194{
Wei Yongjunbafff172012-12-03 16:15:00 +0100195 unsigned long *table;
Jan Glauber828b35f2012-11-29 14:33:30 +0100196 int rtx;
197
198 if (!zdev || !zdev->dma_table)
199 return;
200
Wei Yongjunbafff172012-12-03 16:15:00 +0100201 table = zdev->dma_table;
Jan Glauber828b35f2012-11-29 14:33:30 +0100202 for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
203 if (reg_entry_isvalid(table[rtx]))
204 dma_free_seg_table(table[rtx]);
205
206 dma_free_cpu_table(table);
207 zdev->dma_table = NULL;
208}
209
210static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start,
211 int size)
212{
213 unsigned long boundary_size = 0x1000000;
214
215 return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
216 start, size, 0, boundary_size, 0);
217}
218
219static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
220{
221 unsigned long offset, flags;
222
223 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
224 offset = __dma_alloc_iommu(zdev, zdev->next_bit, size);
225 if (offset == -1)
226 offset = __dma_alloc_iommu(zdev, 0, size);
227
228 if (offset != -1) {
229 zdev->next_bit = offset + size;
230 if (zdev->next_bit >= zdev->iommu_pages)
231 zdev->next_bit = 0;
232 }
233 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
234 return offset;
235}
236
237static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size)
238{
239 unsigned long flags;
240
241 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
242 if (!zdev->iommu_bitmap)
243 goto out;
244 bitmap_clear(zdev->iommu_bitmap, offset, size);
245 if (offset >= zdev->next_bit)
246 zdev->next_bit = offset + size;
247out:
248 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
249}
250
251int dma_set_mask(struct device *dev, u64 mask)
252{
253 if (!dev->dma_mask || !dma_supported(dev, mask))
254 return -EIO;
255
256 *dev->dma_mask = mask;
257 return 0;
258}
259EXPORT_SYMBOL_GPL(dma_set_mask);
260
261static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
262 unsigned long offset, size_t size,
263 enum dma_data_direction direction,
264 struct dma_attrs *attrs)
265{
266 struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
267 unsigned long nr_pages, iommu_page_index;
268 unsigned long pa = page_to_phys(page) + offset;
269 int flags = ZPCI_PTE_VALID;
270 dma_addr_t dma_addr;
271
272 WARN_ON_ONCE(offset > PAGE_SIZE);
273
274 /* This rounds up number of pages based on size and offset */
275 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
276 iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
277 if (iommu_page_index == -1)
278 goto out_err;
279
280 /* Use rounded up size */
281 size = nr_pages * PAGE_SIZE;
282
283 dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
284 if (dma_addr + size > zdev->end_dma) {
285 dev_err(dev, "(dma_addr: 0x%16.16LX + size: 0x%16.16lx) > end_dma: 0x%16.16Lx\n",
286 dma_addr, size, zdev->end_dma);
287 goto out_free;
288 }
289
290 if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
291 flags |= ZPCI_TABLE_PROTECTED;
292
Jan Glauberd0b08852012-12-11 14:53:35 +0100293 if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
294 atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages);
Jan Glauber828b35f2012-11-29 14:33:30 +0100295 return dma_addr + offset;
Jan Glauberd0b08852012-12-11 14:53:35 +0100296 }
Jan Glauber828b35f2012-11-29 14:33:30 +0100297
298out_free:
299 dma_free_iommu(zdev, iommu_page_index, nr_pages);
300out_err:
301 dev_err(dev, "Failed to map addr: %lx\n", pa);
302 return DMA_ERROR_CODE;
303}
304
305static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
306 size_t size, enum dma_data_direction direction,
307 struct dma_attrs *attrs)
308{
309 struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
310 unsigned long iommu_page_index;
311 int npages;
312
313 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
314 dma_addr = dma_addr & PAGE_MASK;
315 if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
316 ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID))
317 dev_err(dev, "Failed to unmap addr: %Lx\n", dma_addr);
318
Jan Glauberd0b08852012-12-11 14:53:35 +0100319 atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages);
Jan Glauber828b35f2012-11-29 14:33:30 +0100320 iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
321 dma_free_iommu(zdev, iommu_page_index, npages);
322}
323
324static void *s390_dma_alloc(struct device *dev, size_t size,
325 dma_addr_t *dma_handle, gfp_t flag,
326 struct dma_attrs *attrs)
327{
Jan Glauberd0b08852012-12-11 14:53:35 +0100328 struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
Jan Glauber828b35f2012-11-29 14:33:30 +0100329 struct page *page;
330 unsigned long pa;
331 dma_addr_t map;
332
333 size = PAGE_ALIGN(size);
334 page = alloc_pages(flag, get_order(size));
335 if (!page)
336 return NULL;
Jan Glauberd0b08852012-12-11 14:53:35 +0100337
338 atomic64_add(size / PAGE_SIZE, (atomic64_t *) &zdev->fmb->allocated_pages);
Jan Glauber828b35f2012-11-29 14:33:30 +0100339 pa = page_to_phys(page);
340 memset((void *) pa, 0, size);
341
342 map = s390_dma_map_pages(dev, page, pa % PAGE_SIZE,
343 size, DMA_BIDIRECTIONAL, NULL);
344 if (dma_mapping_error(dev, map)) {
345 free_pages(pa, get_order(size));
346 return NULL;
347 }
348
349 if (dma_handle)
350 *dma_handle = map;
351 return (void *) pa;
352}
353
354static void s390_dma_free(struct device *dev, size_t size,
355 void *pa, dma_addr_t dma_handle,
356 struct dma_attrs *attrs)
357{
358 s390_dma_unmap_pages(dev, dma_handle, PAGE_ALIGN(size),
359 DMA_BIDIRECTIONAL, NULL);
360 free_pages((unsigned long) pa, get_order(size));
361}
362
363static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
364 int nr_elements, enum dma_data_direction dir,
365 struct dma_attrs *attrs)
366{
367 int mapped_elements = 0;
368 struct scatterlist *s;
369 int i;
370
371 for_each_sg(sg, s, nr_elements, i) {
372 struct page *page = sg_page(s);
373 s->dma_address = s390_dma_map_pages(dev, page, s->offset,
374 s->length, dir, NULL);
375 if (!dma_mapping_error(dev, s->dma_address)) {
376 s->dma_length = s->length;
377 mapped_elements++;
378 } else
379 goto unmap;
380 }
381out:
382 return mapped_elements;
383
384unmap:
385 for_each_sg(sg, s, mapped_elements, i) {
386 if (s->dma_address)
387 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
388 dir, NULL);
389 s->dma_address = 0;
390 s->dma_length = 0;
391 }
392 mapped_elements = 0;
393 goto out;
394}
395
396static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
397 int nr_elements, enum dma_data_direction dir,
398 struct dma_attrs *attrs)
399{
400 struct scatterlist *s;
401 int i;
402
403 for_each_sg(sg, s, nr_elements, i) {
404 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
405 s->dma_address = 0;
406 s->dma_length = 0;
407 }
408}
409
410int zpci_dma_init_device(struct zpci_dev *zdev)
411{
412 unsigned int bitmap_order;
413 int rc;
414
415 spin_lock_init(&zdev->iommu_bitmap_lock);
416 spin_lock_init(&zdev->dma_table_lock);
417
418 zdev->dma_table = dma_alloc_cpu_table();
419 if (!zdev->dma_table) {
420 rc = -ENOMEM;
421 goto out_clean;
422 }
423
424 zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
425 zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
426 bitmap_order = get_order(zdev->iommu_pages / 8);
427 pr_info("iommu_size: 0x%lx iommu_pages: 0x%lx bitmap_order: %i\n",
428 zdev->iommu_size, zdev->iommu_pages, bitmap_order);
429
430 zdev->iommu_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
431 bitmap_order);
432 if (!zdev->iommu_bitmap) {
433 rc = -ENOMEM;
434 goto out_reg;
435 }
436
437 rc = zpci_register_ioat(zdev,
438 0,
439 zdev->start_dma + PAGE_OFFSET,
440 zdev->start_dma + zdev->iommu_size - 1,
441 (u64) zdev->dma_table);
442 if (rc)
443 goto out_reg;
444 return 0;
445
446out_reg:
447 dma_free_cpu_table(zdev->dma_table);
448out_clean:
449 return rc;
450}
451
452void zpci_dma_exit_device(struct zpci_dev *zdev)
453{
454 zpci_unregister_ioat(zdev, 0);
455 dma_cleanup_tables(zdev);
456 free_pages((unsigned long) zdev->iommu_bitmap,
457 get_order(zdev->iommu_pages / 8));
458 zdev->iommu_bitmap = NULL;
459 zdev->next_bit = 0;
460}
461
462static int __init dma_alloc_cpu_table_caches(void)
463{
464 dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
465 ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
466 0, NULL);
467 if (!dma_region_table_cache)
468 return -ENOMEM;
469
470 dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
471 ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
472 0, NULL);
473 if (!dma_page_table_cache) {
474 kmem_cache_destroy(dma_region_table_cache);
475 return -ENOMEM;
476 }
477 return 0;
478}
479
480int __init zpci_dma_init(void)
481{
482 return dma_alloc_cpu_table_caches();
483}
484
485void zpci_dma_exit(void)
486{
487 kmem_cache_destroy(dma_page_table_cache);
488 kmem_cache_destroy(dma_region_table_cache);
489}
490
491#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
492
493static int __init dma_debug_do_init(void)
494{
495 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
496 return 0;
497}
498fs_initcall(dma_debug_do_init);
499
500struct dma_map_ops s390_dma_ops = {
501 .alloc = s390_dma_alloc,
502 .free = s390_dma_free,
503 .map_sg = s390_dma_map_sg,
504 .unmap_sg = s390_dma_unmap_sg,
505 .map_page = s390_dma_map_pages,
506 .unmap_page = s390_dma_unmap_pages,
507 /* if we support direct DMA this must be conditional */
508 .is_phys = 0,
509 /* dma_supported is unconditionally true without a callback */
510};
511EXPORT_SYMBOL_GPL(s390_dma_ops);