blob: e4a3a31fd59a2e6e4fc21843774b87d7f24c0112 [file] [log] [blame]
Jan Glauber828b35f2012-11-29 14:33:30 +01001/*
2 * Copyright IBM Corp. 2012
3 *
4 * Author(s):
5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/slab.h>
10#include <linux/export.h>
11#include <linux/iommu-helper.h>
12#include <linux/dma-mapping.h>
Sebastian Ott22459322013-08-29 20:31:50 +020013#include <linux/vmalloc.h>
Jan Glauber828b35f2012-11-29 14:33:30 +010014#include <linux/pci.h>
15#include <asm/pci_dma.h>
16
Jan Glauber828b35f2012-11-29 14:33:30 +010017static struct kmem_cache *dma_region_table_cache;
18static struct kmem_cache *dma_page_table_cache;
Gerald Schaeferc60d1ae2014-07-18 17:37:08 +020019static int s390_iommu_strict;
20
21static int zpci_refresh_global(struct zpci_dev *zdev)
22{
23 return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma,
24 zdev->iommu_pages * PAGE_SIZE);
25}
Jan Glauber828b35f2012-11-29 14:33:30 +010026
Gerald Schaefer8128f23c2015-08-27 15:33:03 +020027unsigned long *dma_alloc_cpu_table(void)
Jan Glauber828b35f2012-11-29 14:33:30 +010028{
29 unsigned long *table, *entry;
30
31 table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
32 if (!table)
33 return NULL;
34
35 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
Sebastian Ott4d5a6b72015-10-26 11:15:28 +010036 *entry = ZPCI_TABLE_INVALID;
Jan Glauber828b35f2012-11-29 14:33:30 +010037 return table;
38}
39
40static void dma_free_cpu_table(void *table)
41{
42 kmem_cache_free(dma_region_table_cache, table);
43}
44
45static unsigned long *dma_alloc_page_table(void)
46{
47 unsigned long *table, *entry;
48
49 table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
50 if (!table)
51 return NULL;
52
53 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
Sebastian Ott4d5a6b72015-10-26 11:15:28 +010054 *entry = ZPCI_PTE_INVALID;
Jan Glauber828b35f2012-11-29 14:33:30 +010055 return table;
56}
57
58static void dma_free_page_table(void *table)
59{
60 kmem_cache_free(dma_page_table_cache, table);
61}
62
63static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
64{
65 unsigned long *sto;
66
67 if (reg_entry_isvalid(*entry))
68 sto = get_rt_sto(*entry);
69 else {
70 sto = dma_alloc_cpu_table();
71 if (!sto)
72 return NULL;
73
74 set_rt_sto(entry, sto);
75 validate_rt_entry(entry);
76 entry_clr_protected(entry);
77 }
78 return sto;
79}
80
81static unsigned long *dma_get_page_table_origin(unsigned long *entry)
82{
83 unsigned long *pto;
84
85 if (reg_entry_isvalid(*entry))
86 pto = get_st_pto(*entry);
87 else {
88 pto = dma_alloc_page_table();
89 if (!pto)
90 return NULL;
91 set_st_pto(entry, pto);
92 validate_st_entry(entry);
93 entry_clr_protected(entry);
94 }
95 return pto;
96}
97
98static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
99{
100 unsigned long *sto, *pto;
101 unsigned int rtx, sx, px;
102
103 rtx = calc_rtx(dma_addr);
104 sto = dma_get_seg_table_origin(&rto[rtx]);
105 if (!sto)
106 return NULL;
107
108 sx = calc_sx(dma_addr);
109 pto = dma_get_page_table_origin(&sto[sx]);
110 if (!pto)
111 return NULL;
112
113 px = calc_px(dma_addr);
114 return &pto[px];
115}
116
Gerald Schaefer8128f23c2015-08-27 15:33:03 +0200117void dma_update_cpu_trans(unsigned long *dma_table, void *page_addr,
118 dma_addr_t dma_addr, int flags)
Jan Glauber828b35f2012-11-29 14:33:30 +0100119{
120 unsigned long *entry;
121
Gerald Schaefer8128f23c2015-08-27 15:33:03 +0200122 entry = dma_walk_cpu_trans(dma_table, dma_addr);
Jan Glauber828b35f2012-11-29 14:33:30 +0100123 if (!entry) {
124 WARN_ON_ONCE(1);
125 return;
126 }
127
128 if (flags & ZPCI_PTE_INVALID) {
129 invalidate_pt_entry(entry);
Jan Glauber828b35f2012-11-29 14:33:30 +0100130 } else {
131 set_pt_pfaa(entry, page_addr);
132 validate_pt_entry(entry);
133 }
134
135 if (flags & ZPCI_TABLE_PROTECTED)
136 entry_set_protected(entry);
137 else
138 entry_clr_protected(entry);
139}
140
141static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
142 dma_addr_t dma_addr, size_t size, int flags)
143{
144 unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
145 u8 *page_addr = (u8 *) (pa & PAGE_MASK);
146 dma_addr_t start_dma_addr = dma_addr;
147 unsigned long irq_flags;
148 int i, rc = 0;
149
150 if (!nr_pages)
151 return -EINVAL;
152
153 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
Sebastian Ott1f1dcbd2013-10-22 15:17:19 +0200154 if (!zdev->dma_table)
Jan Glauber828b35f2012-11-29 14:33:30 +0100155 goto no_refresh;
Jan Glauber828b35f2012-11-29 14:33:30 +0100156
157 for (i = 0; i < nr_pages; i++) {
Gerald Schaefer8128f23c2015-08-27 15:33:03 +0200158 dma_update_cpu_trans(zdev->dma_table, page_addr, dma_addr,
159 flags);
Jan Glauber828b35f2012-11-29 14:33:30 +0100160 page_addr += PAGE_SIZE;
161 dma_addr += PAGE_SIZE;
162 }
163
164 /*
Gerald Schaeferc60d1ae2014-07-18 17:37:08 +0200165 * With zdev->tlb_refresh == 0, rpcit is not required to establish new
166 * translations when previously invalid translation-table entries are
167 * validated. With lazy unmap, it also is skipped for previously valid
168 * entries, but a global rpcit is then required before any address can
169 * be re-used, i.e. after each iommu bitmap wrap-around.
Jan Glauber828b35f2012-11-29 14:33:30 +0100170 */
171 if (!zdev->tlb_refresh &&
Gerald Schaeferc60d1ae2014-07-18 17:37:08 +0200172 (!s390_iommu_strict ||
173 ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)))
Jan Glauber828b35f2012-11-29 14:33:30 +0100174 goto no_refresh;
Sebastian Ottb2a9e872013-04-16 14:15:42 +0200175
Martin Schwidefsky93893392013-06-25 14:52:23 +0200176 rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
177 nr_pages * PAGE_SIZE);
Jan Glauber828b35f2012-11-29 14:33:30 +0100178
179no_refresh:
180 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
181 return rc;
182}
183
Gerald Schaefer8128f23c2015-08-27 15:33:03 +0200184void dma_free_seg_table(unsigned long entry)
Jan Glauber828b35f2012-11-29 14:33:30 +0100185{
186 unsigned long *sto = get_rt_sto(entry);
187 int sx;
188
189 for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
190 if (reg_entry_isvalid(sto[sx]))
191 dma_free_page_table(get_st_pto(sto[sx]));
192
193 dma_free_cpu_table(sto);
194}
195
Gerald Schaefer8128f23c2015-08-27 15:33:03 +0200196void dma_cleanup_tables(unsigned long *table)
Jan Glauber828b35f2012-11-29 14:33:30 +0100197{
Jan Glauber828b35f2012-11-29 14:33:30 +0100198 int rtx;
199
Gerald Schaefer8128f23c2015-08-27 15:33:03 +0200200 if (!table)
Jan Glauber828b35f2012-11-29 14:33:30 +0100201 return;
202
Jan Glauber828b35f2012-11-29 14:33:30 +0100203 for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
204 if (reg_entry_isvalid(table[rtx]))
205 dma_free_seg_table(table[rtx]);
206
207 dma_free_cpu_table(table);
Jan Glauber828b35f2012-11-29 14:33:30 +0100208}
209
Gerald Schaefer5ec6d492014-02-18 19:47:17 +0100210static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev,
211 unsigned long start, int size)
Jan Glauber828b35f2012-11-29 14:33:30 +0100212{
Gerald Schaefer5ec6d492014-02-18 19:47:17 +0100213 unsigned long boundary_size;
Jan Glauber828b35f2012-11-29 14:33:30 +0100214
Gerald Schaefer5ec6d492014-02-18 19:47:17 +0100215 boundary_size = ALIGN(dma_get_seg_boundary(&zdev->pdev->dev) + 1,
216 PAGE_SIZE) >> PAGE_SHIFT;
Jan Glauber828b35f2012-11-29 14:33:30 +0100217 return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
218 start, size, 0, boundary_size, 0);
219}
220
221static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
222{
223 unsigned long offset, flags;
Gerald Schaeferc60d1ae2014-07-18 17:37:08 +0200224 int wrap = 0;
Jan Glauber828b35f2012-11-29 14:33:30 +0100225
226 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
227 offset = __dma_alloc_iommu(zdev, zdev->next_bit, size);
Gerald Schaeferc60d1ae2014-07-18 17:37:08 +0200228 if (offset == -1) {
229 /* wrap-around */
Jan Glauber828b35f2012-11-29 14:33:30 +0100230 offset = __dma_alloc_iommu(zdev, 0, size);
Gerald Schaeferc60d1ae2014-07-18 17:37:08 +0200231 wrap = 1;
232 }
Jan Glauber828b35f2012-11-29 14:33:30 +0100233
234 if (offset != -1) {
235 zdev->next_bit = offset + size;
Gerald Schaeferc60d1ae2014-07-18 17:37:08 +0200236 if (!zdev->tlb_refresh && !s390_iommu_strict && wrap)
237 /* global flush after wrap-around with lazy unmap */
238 zpci_refresh_global(zdev);
Jan Glauber828b35f2012-11-29 14:33:30 +0100239 }
240 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
241 return offset;
242}
243
244static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size)
245{
246 unsigned long flags;
247
248 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
249 if (!zdev->iommu_bitmap)
250 goto out;
251 bitmap_clear(zdev->iommu_bitmap, offset, size);
Gerald Schaeferc60d1ae2014-07-18 17:37:08 +0200252 /*
253 * Lazy flush for unmap: need to move next_bit to avoid address re-use
254 * until wrap-around.
255 */
256 if (!s390_iommu_strict && offset >= zdev->next_bit)
Jan Glauber828b35f2012-11-29 14:33:30 +0100257 zdev->next_bit = offset + size;
258out:
259 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
260}
261
Jan Glauber828b35f2012-11-29 14:33:30 +0100262static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
263 unsigned long offset, size_t size,
264 enum dma_data_direction direction,
265 struct dma_attrs *attrs)
266{
Sebastian Ott198a5272015-06-23 14:06:35 +0200267 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
Jan Glauber828b35f2012-11-29 14:33:30 +0100268 unsigned long nr_pages, iommu_page_index;
269 unsigned long pa = page_to_phys(page) + offset;
270 int flags = ZPCI_PTE_VALID;
271 dma_addr_t dma_addr;
272
Jan Glauber828b35f2012-11-29 14:33:30 +0100273 /* This rounds up number of pages based on size and offset */
274 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
275 iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
276 if (iommu_page_index == -1)
277 goto out_err;
278
279 /* Use rounded up size */
280 size = nr_pages * PAGE_SIZE;
281
282 dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
Sebastian Ott1f1dcbd2013-10-22 15:17:19 +0200283 if (dma_addr + size > zdev->end_dma)
Jan Glauber828b35f2012-11-29 14:33:30 +0100284 goto out_free;
Jan Glauber828b35f2012-11-29 14:33:30 +0100285
286 if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
287 flags |= ZPCI_TABLE_PROTECTED;
288
Jan Glauberd0b08852012-12-11 14:53:35 +0100289 if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
Sebastian Ott60010182015-04-10 14:33:08 +0200290 atomic64_add(nr_pages, &zdev->mapped_pages);
Gerald Schaefer186f50f2013-04-22 19:27:17 +0200291 return dma_addr + (offset & ~PAGE_MASK);
Jan Glauberd0b08852012-12-11 14:53:35 +0100292 }
Jan Glauber828b35f2012-11-29 14:33:30 +0100293
294out_free:
295 dma_free_iommu(zdev, iommu_page_index, nr_pages);
296out_err:
Sebastian Ott1f1dcbd2013-10-22 15:17:19 +0200297 zpci_err("map error:\n");
298 zpci_err_hex(&pa, sizeof(pa));
Jan Glauber828b35f2012-11-29 14:33:30 +0100299 return DMA_ERROR_CODE;
300}
301
302static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
303 size_t size, enum dma_data_direction direction,
304 struct dma_attrs *attrs)
305{
Sebastian Ott198a5272015-06-23 14:06:35 +0200306 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
Jan Glauber828b35f2012-11-29 14:33:30 +0100307 unsigned long iommu_page_index;
308 int npages;
309
310 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
311 dma_addr = dma_addr & PAGE_MASK;
312 if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
Sebastian Ott4d5a6b72015-10-26 11:15:28 +0100313 ZPCI_PTE_INVALID)) {
Sebastian Ott1f1dcbd2013-10-22 15:17:19 +0200314 zpci_err("unmap error:\n");
315 zpci_err_hex(&dma_addr, sizeof(dma_addr));
316 }
Jan Glauber828b35f2012-11-29 14:33:30 +0100317
Sebastian Ott60010182015-04-10 14:33:08 +0200318 atomic64_add(npages, &zdev->unmapped_pages);
Jan Glauber828b35f2012-11-29 14:33:30 +0100319 iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
320 dma_free_iommu(zdev, iommu_page_index, npages);
321}
322
323static void *s390_dma_alloc(struct device *dev, size_t size,
324 dma_addr_t *dma_handle, gfp_t flag,
325 struct dma_attrs *attrs)
326{
Sebastian Ott198a5272015-06-23 14:06:35 +0200327 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
Jan Glauber828b35f2012-11-29 14:33:30 +0100328 struct page *page;
329 unsigned long pa;
330 dma_addr_t map;
331
332 size = PAGE_ALIGN(size);
333 page = alloc_pages(flag, get_order(size));
334 if (!page)
335 return NULL;
Jan Glauberd0b08852012-12-11 14:53:35 +0100336
Jan Glauber828b35f2012-11-29 14:33:30 +0100337 pa = page_to_phys(page);
338 memset((void *) pa, 0, size);
339
340 map = s390_dma_map_pages(dev, page, pa % PAGE_SIZE,
341 size, DMA_BIDIRECTIONAL, NULL);
342 if (dma_mapping_error(dev, map)) {
343 free_pages(pa, get_order(size));
344 return NULL;
345 }
346
Sebastian Ott60010182015-04-10 14:33:08 +0200347 atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
Jan Glauber828b35f2012-11-29 14:33:30 +0100348 if (dma_handle)
349 *dma_handle = map;
350 return (void *) pa;
351}
352
353static void s390_dma_free(struct device *dev, size_t size,
354 void *pa, dma_addr_t dma_handle,
355 struct dma_attrs *attrs)
356{
Sebastian Ott198a5272015-06-23 14:06:35 +0200357 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
Sebastian Ottf7038b72013-12-12 17:53:59 +0100358
359 size = PAGE_ALIGN(size);
Sebastian Ott60010182015-04-10 14:33:08 +0200360 atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
Sebastian Ottf7038b72013-12-12 17:53:59 +0100361 s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Jan Glauber828b35f2012-11-29 14:33:30 +0100362 free_pages((unsigned long) pa, get_order(size));
363}
364
365static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
366 int nr_elements, enum dma_data_direction dir,
367 struct dma_attrs *attrs)
368{
369 int mapped_elements = 0;
370 struct scatterlist *s;
371 int i;
372
373 for_each_sg(sg, s, nr_elements, i) {
374 struct page *page = sg_page(s);
375 s->dma_address = s390_dma_map_pages(dev, page, s->offset,
376 s->length, dir, NULL);
377 if (!dma_mapping_error(dev, s->dma_address)) {
378 s->dma_length = s->length;
379 mapped_elements++;
380 } else
381 goto unmap;
382 }
383out:
384 return mapped_elements;
385
386unmap:
387 for_each_sg(sg, s, mapped_elements, i) {
388 if (s->dma_address)
389 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
390 dir, NULL);
391 s->dma_address = 0;
392 s->dma_length = 0;
393 }
394 mapped_elements = 0;
395 goto out;
396}
397
398static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
399 int nr_elements, enum dma_data_direction dir,
400 struct dma_attrs *attrs)
401{
402 struct scatterlist *s;
403 int i;
404
405 for_each_sg(sg, s, nr_elements, i) {
406 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
407 s->dma_address = 0;
408 s->dma_length = 0;
409 }
410}
411
412int zpci_dma_init_device(struct zpci_dev *zdev)
413{
Jan Glauber828b35f2012-11-29 14:33:30 +0100414 int rc;
415
Gerald Schaefer8128f23c2015-08-27 15:33:03 +0200416 /*
417 * At this point, if the device is part of an IOMMU domain, this would
418 * be a strong hint towards a bug in the IOMMU API (common) code and/or
419 * simultaneous access via IOMMU and DMA API. So let's issue a warning.
420 */
421 WARN_ON(zdev->s390_domain);
422
Jan Glauber828b35f2012-11-29 14:33:30 +0100423 spin_lock_init(&zdev->iommu_bitmap_lock);
424 spin_lock_init(&zdev->dma_table_lock);
425
426 zdev->dma_table = dma_alloc_cpu_table();
427 if (!zdev->dma_table) {
428 rc = -ENOMEM;
429 goto out_clean;
430 }
431
432 zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
433 zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
Sebastian Ott22459322013-08-29 20:31:50 +0200434 zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
Jan Glauber828b35f2012-11-29 14:33:30 +0100435 if (!zdev->iommu_bitmap) {
436 rc = -ENOMEM;
437 goto out_reg;
438 }
439
440 rc = zpci_register_ioat(zdev,
441 0,
442 zdev->start_dma + PAGE_OFFSET,
443 zdev->start_dma + zdev->iommu_size - 1,
444 (u64) zdev->dma_table);
445 if (rc)
446 goto out_reg;
447 return 0;
448
449out_reg:
450 dma_free_cpu_table(zdev->dma_table);
451out_clean:
452 return rc;
453}
454
455void zpci_dma_exit_device(struct zpci_dev *zdev)
456{
Gerald Schaefer8128f23c2015-08-27 15:33:03 +0200457 /*
458 * At this point, if the device is part of an IOMMU domain, this would
459 * be a strong hint towards a bug in the IOMMU API (common) code and/or
460 * simultaneous access via IOMMU and DMA API. So let's issue a warning.
461 */
462 WARN_ON(zdev->s390_domain);
463
Jan Glauber828b35f2012-11-29 14:33:30 +0100464 zpci_unregister_ioat(zdev, 0);
Gerald Schaefer8128f23c2015-08-27 15:33:03 +0200465 dma_cleanup_tables(zdev->dma_table);
466 zdev->dma_table = NULL;
Sebastian Ott22459322013-08-29 20:31:50 +0200467 vfree(zdev->iommu_bitmap);
Jan Glauber828b35f2012-11-29 14:33:30 +0100468 zdev->iommu_bitmap = NULL;
469 zdev->next_bit = 0;
470}
471
472static int __init dma_alloc_cpu_table_caches(void)
473{
474 dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
475 ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
476 0, NULL);
477 if (!dma_region_table_cache)
478 return -ENOMEM;
479
480 dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
481 ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
482 0, NULL);
483 if (!dma_page_table_cache) {
484 kmem_cache_destroy(dma_region_table_cache);
485 return -ENOMEM;
486 }
487 return 0;
488}
489
490int __init zpci_dma_init(void)
491{
492 return dma_alloc_cpu_table_caches();
493}
494
495void zpci_dma_exit(void)
496{
497 kmem_cache_destroy(dma_page_table_cache);
498 kmem_cache_destroy(dma_region_table_cache);
499}
500
501#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
502
503static int __init dma_debug_do_init(void)
504{
505 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
506 return 0;
507}
508fs_initcall(dma_debug_do_init);
509
510struct dma_map_ops s390_dma_ops = {
511 .alloc = s390_dma_alloc,
512 .free = s390_dma_free,
513 .map_sg = s390_dma_map_sg,
514 .unmap_sg = s390_dma_unmap_sg,
515 .map_page = s390_dma_map_pages,
516 .unmap_page = s390_dma_unmap_pages,
517 /* if we support direct DMA this must be conditional */
518 .is_phys = 0,
519 /* dma_supported is unconditionally true without a callback */
520};
521EXPORT_SYMBOL_GPL(s390_dma_ops);
Gerald Schaeferc60d1ae2014-07-18 17:37:08 +0200522
523static int __init s390_iommu_setup(char *str)
524{
525 if (!strncmp(str, "strict", 6))
526 s390_iommu_strict = 1;
527 return 0;
528}
529
530__setup("s390_iommu=", s390_iommu_setup);