blob: b06d93594436984fa954982a0cb763206df268c0 [file] [log] [blame]
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
Joerg Roedel63ce3ae2015-02-04 16:12:55 +01003 * Author: Joerg Roedel <jroedel@suse.de>
Joerg Roedelfc2100e2008-11-26 17:21:24 +01004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
Joerg Roedel92e70662015-05-28 18:41:24 +020019#define pr_fmt(fmt) "iommu: " fmt
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +020020
Joerg Roedel905d66c2011-09-06 16:03:26 +020021#include <linux/device.h>
Ohad Ben-Cohen40998182011-09-02 13:32:32 -040022#include <linux/kernel.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010023#include <linux/bug.h>
24#include <linux/types.h>
Andrew Morton60db4022009-05-06 16:03:07 -070025#include <linux/module.h>
26#include <linux/slab.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010027#include <linux/errno.h>
28#include <linux/iommu.h>
Alex Williamsond72e31c2012-05-30 14:18:53 -060029#include <linux/idr.h>
30#include <linux/notifier.h>
31#include <linux/err.h>
Alex Williamson104a1c12014-07-03 09:51:18 -060032#include <linux/pci.h>
Alex Williamsonf096c062014-09-19 10:03:06 -060033#include <linux/bitops.h>
Shuah Khan7f6db172013-08-15 11:59:23 -060034#include <trace/events/iommu.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010035
Alex Williamsond72e31c2012-05-30 14:18:53 -060036static struct kset *iommu_group_kset;
Heiner Kallweite38d1f12016-06-28 20:38:36 +020037static DEFINE_IDA(iommu_group_ida);
Alex Williamsond72e31c2012-05-30 14:18:53 -060038
Thierry Redingb22f6432014-06-27 09:03:12 +020039struct iommu_callback_data {
40 const struct iommu_ops *ops;
41};
42
Alex Williamsond72e31c2012-05-30 14:18:53 -060043struct iommu_group {
44 struct kobject kobj;
45 struct kobject *devices_kobj;
46 struct list_head devices;
47 struct mutex mutex;
48 struct blocking_notifier_head notifier;
49 void *iommu_data;
50 void (*iommu_data_release)(void *iommu_data);
51 char *name;
52 int id;
Joerg Roedel53723dc2015-05-28 18:41:29 +020053 struct iommu_domain *default_domain;
Joerg Roedele39cb8a2015-05-28 18:41:31 +020054 struct iommu_domain *domain;
Alex Williamsond72e31c2012-05-30 14:18:53 -060055};
56
57struct iommu_device {
58 struct list_head list;
59 struct device *dev;
60 char *name;
61};
62
63struct iommu_group_attribute {
64 struct attribute attr;
65 ssize_t (*show)(struct iommu_group *group, char *buf);
66 ssize_t (*store)(struct iommu_group *group,
67 const char *buf, size_t count);
68};
69
70#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
71struct iommu_group_attribute iommu_group_attr_##_name = \
72 __ATTR(_name, _mode, _show, _store)
73
74#define to_iommu_group_attr(_attr) \
75 container_of(_attr, struct iommu_group_attribute, attr)
76#define to_iommu_group(_kobj) \
77 container_of(_kobj, struct iommu_group, kobj)
78
Joerg Roedel53723dc2015-05-28 18:41:29 +020079static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
80 unsigned type);
Joerg Roedele39cb8a2015-05-28 18:41:31 +020081static int __iommu_attach_device(struct iommu_domain *domain,
82 struct device *dev);
83static int __iommu_attach_group(struct iommu_domain *domain,
84 struct iommu_group *group);
85static void __iommu_detach_group(struct iommu_domain *domain,
86 struct iommu_group *group);
Joerg Roedel53723dc2015-05-28 18:41:29 +020087
Alex Williamsond72e31c2012-05-30 14:18:53 -060088static ssize_t iommu_group_attr_show(struct kobject *kobj,
89 struct attribute *__attr, char *buf)
Alex Williamson14604322011-10-21 15:56:05 -040090{
Alex Williamsond72e31c2012-05-30 14:18:53 -060091 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
92 struct iommu_group *group = to_iommu_group(kobj);
93 ssize_t ret = -EIO;
Alex Williamson14604322011-10-21 15:56:05 -040094
Alex Williamsond72e31c2012-05-30 14:18:53 -060095 if (attr->show)
96 ret = attr->show(group, buf);
97 return ret;
Alex Williamson14604322011-10-21 15:56:05 -040098}
Alex Williamsond72e31c2012-05-30 14:18:53 -060099
100static ssize_t iommu_group_attr_store(struct kobject *kobj,
101 struct attribute *__attr,
102 const char *buf, size_t count)
103{
104 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
105 struct iommu_group *group = to_iommu_group(kobj);
106 ssize_t ret = -EIO;
107
108 if (attr->store)
109 ret = attr->store(group, buf, count);
110 return ret;
111}
112
113static const struct sysfs_ops iommu_group_sysfs_ops = {
114 .show = iommu_group_attr_show,
115 .store = iommu_group_attr_store,
116};
117
118static int iommu_group_create_file(struct iommu_group *group,
119 struct iommu_group_attribute *attr)
120{
121 return sysfs_create_file(&group->kobj, &attr->attr);
122}
123
124static void iommu_group_remove_file(struct iommu_group *group,
125 struct iommu_group_attribute *attr)
126{
127 sysfs_remove_file(&group->kobj, &attr->attr);
128}
129
130static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
131{
132 return sprintf(buf, "%s\n", group->name);
133}
134
135static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
136
137static void iommu_group_release(struct kobject *kobj)
138{
139 struct iommu_group *group = to_iommu_group(kobj);
140
Joerg Roedel269aa802015-05-28 18:41:25 +0200141 pr_debug("Releasing group %d\n", group->id);
142
Alex Williamsond72e31c2012-05-30 14:18:53 -0600143 if (group->iommu_data_release)
144 group->iommu_data_release(group->iommu_data);
145
Heiner Kallweitfeccf392016-06-29 21:13:59 +0200146 ida_simple_remove(&iommu_group_ida, group->id);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600147
Joerg Roedel53723dc2015-05-28 18:41:29 +0200148 if (group->default_domain)
149 iommu_domain_free(group->default_domain);
150
Alex Williamsond72e31c2012-05-30 14:18:53 -0600151 kfree(group->name);
152 kfree(group);
153}
154
155static struct kobj_type iommu_group_ktype = {
156 .sysfs_ops = &iommu_group_sysfs_ops,
157 .release = iommu_group_release,
158};
159
160/**
161 * iommu_group_alloc - Allocate a new group
162 * @name: Optional name to associate with group, visible in sysfs
163 *
164 * This function is called by an iommu driver to allocate a new iommu
165 * group. The iommu group represents the minimum granularity of the iommu.
166 * Upon successful return, the caller holds a reference to the supplied
167 * group in order to hold the group until devices are added. Use
168 * iommu_group_put() to release this extra reference count, allowing the
169 * group to be automatically reclaimed once it has no devices or external
170 * references.
171 */
172struct iommu_group *iommu_group_alloc(void)
173{
174 struct iommu_group *group;
175 int ret;
176
177 group = kzalloc(sizeof(*group), GFP_KERNEL);
178 if (!group)
179 return ERR_PTR(-ENOMEM);
180
181 group->kobj.kset = iommu_group_kset;
182 mutex_init(&group->mutex);
183 INIT_LIST_HEAD(&group->devices);
184 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
185
Heiner Kallweitfeccf392016-06-29 21:13:59 +0200186 ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
187 if (ret < 0) {
Alex Williamsond72e31c2012-05-30 14:18:53 -0600188 kfree(group);
Heiner Kallweitfeccf392016-06-29 21:13:59 +0200189 return ERR_PTR(ret);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600190 }
Heiner Kallweitfeccf392016-06-29 21:13:59 +0200191 group->id = ret;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600192
193 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
194 NULL, "%d", group->id);
195 if (ret) {
Heiner Kallweitfeccf392016-06-29 21:13:59 +0200196 ida_simple_remove(&iommu_group_ida, group->id);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600197 kfree(group);
198 return ERR_PTR(ret);
199 }
200
201 group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
202 if (!group->devices_kobj) {
203 kobject_put(&group->kobj); /* triggers .release & free */
204 return ERR_PTR(-ENOMEM);
205 }
206
207 /*
208 * The devices_kobj holds a reference on the group kobject, so
209 * as long as that exists so will the group. We can therefore
210 * use the devices_kobj for reference counting.
211 */
212 kobject_put(&group->kobj);
213
Joerg Roedel269aa802015-05-28 18:41:25 +0200214 pr_debug("Allocated group %d\n", group->id);
215
Alex Williamsond72e31c2012-05-30 14:18:53 -0600216 return group;
217}
218EXPORT_SYMBOL_GPL(iommu_group_alloc);
219
Alexey Kardashevskiyaa16bea2013-03-25 10:23:49 +1100220struct iommu_group *iommu_group_get_by_id(int id)
221{
222 struct kobject *group_kobj;
223 struct iommu_group *group;
224 const char *name;
225
226 if (!iommu_group_kset)
227 return NULL;
228
229 name = kasprintf(GFP_KERNEL, "%d", id);
230 if (!name)
231 return NULL;
232
233 group_kobj = kset_find_obj(iommu_group_kset, name);
234 kfree(name);
235
236 if (!group_kobj)
237 return NULL;
238
239 group = container_of(group_kobj, struct iommu_group, kobj);
240 BUG_ON(group->id != id);
241
242 kobject_get(group->devices_kobj);
243 kobject_put(&group->kobj);
244
245 return group;
246}
247EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
248
Alex Williamsond72e31c2012-05-30 14:18:53 -0600249/**
250 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
251 * @group: the group
252 *
253 * iommu drivers can store data in the group for use when doing iommu
254 * operations. This function provides a way to retrieve it. Caller
255 * should hold a group reference.
256 */
257void *iommu_group_get_iommudata(struct iommu_group *group)
258{
259 return group->iommu_data;
260}
261EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
262
263/**
264 * iommu_group_set_iommudata - set iommu_data for a group
265 * @group: the group
266 * @iommu_data: new data
267 * @release: release function for iommu_data
268 *
269 * iommu drivers can store data in the group for use when doing iommu
270 * operations. This function provides a way to set the data after
271 * the group has been allocated. Caller should hold a group reference.
272 */
273void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
274 void (*release)(void *iommu_data))
275{
276 group->iommu_data = iommu_data;
277 group->iommu_data_release = release;
278}
279EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
280
281/**
282 * iommu_group_set_name - set name for a group
283 * @group: the group
284 * @name: name
285 *
286 * Allow iommu driver to set a name for a group. When set it will
287 * appear in a name attribute file under the group in sysfs.
288 */
289int iommu_group_set_name(struct iommu_group *group, const char *name)
290{
291 int ret;
292
293 if (group->name) {
294 iommu_group_remove_file(group, &iommu_group_attr_name);
295 kfree(group->name);
296 group->name = NULL;
297 if (!name)
298 return 0;
299 }
300
301 group->name = kstrdup(name, GFP_KERNEL);
302 if (!group->name)
303 return -ENOMEM;
304
305 ret = iommu_group_create_file(group, &iommu_group_attr_name);
306 if (ret) {
307 kfree(group->name);
308 group->name = NULL;
309 return ret;
310 }
311
312 return 0;
313}
314EXPORT_SYMBOL_GPL(iommu_group_set_name);
315
Joerg Roedelbeed2822015-05-28 18:41:34 +0200316static int iommu_group_create_direct_mappings(struct iommu_group *group,
317 struct device *dev)
318{
319 struct iommu_domain *domain = group->default_domain;
320 struct iommu_dm_region *entry;
321 struct list_head mappings;
322 unsigned long pg_size;
323 int ret = 0;
324
325 if (!domain || domain->type != IOMMU_DOMAIN_DMA)
326 return 0;
327
Robin Murphyd16e0fa2016-04-07 18:42:06 +0100328 BUG_ON(!domain->pgsize_bitmap);
Joerg Roedelbeed2822015-05-28 18:41:34 +0200329
Robin Murphyd16e0fa2016-04-07 18:42:06 +0100330 pg_size = 1UL << __ffs(domain->pgsize_bitmap);
Joerg Roedelbeed2822015-05-28 18:41:34 +0200331 INIT_LIST_HEAD(&mappings);
332
333 iommu_get_dm_regions(dev, &mappings);
334
335 /* We need to consider overlapping regions for different devices */
336 list_for_each_entry(entry, &mappings, list) {
337 dma_addr_t start, end, addr;
338
Joerg Roedel33b21a62016-07-05 13:07:53 +0200339 if (domain->ops->apply_dm_region)
340 domain->ops->apply_dm_region(dev, domain, entry);
341
Joerg Roedelbeed2822015-05-28 18:41:34 +0200342 start = ALIGN(entry->start, pg_size);
343 end = ALIGN(entry->start + entry->length, pg_size);
344
345 for (addr = start; addr < end; addr += pg_size) {
346 phys_addr_t phys_addr;
347
348 phys_addr = iommu_iova_to_phys(domain, addr);
349 if (phys_addr)
350 continue;
351
352 ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
353 if (ret)
354 goto out;
355 }
356
357 }
358
359out:
360 iommu_put_dm_regions(dev, &mappings);
361
362 return ret;
363}
364
Alex Williamsond72e31c2012-05-30 14:18:53 -0600365/**
366 * iommu_group_add_device - add a device to an iommu group
367 * @group: the group into which to add the device (reference should be held)
368 * @dev: the device
369 *
370 * This function is called by an iommu driver to add a device into a
371 * group. Adding a device increments the group reference count.
372 */
373int iommu_group_add_device(struct iommu_group *group, struct device *dev)
374{
375 int ret, i = 0;
376 struct iommu_device *device;
377
378 device = kzalloc(sizeof(*device), GFP_KERNEL);
379 if (!device)
380 return -ENOMEM;
381
382 device->dev = dev;
383
384 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
385 if (ret) {
386 kfree(device);
387 return ret;
388 }
389
390 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
391rename:
392 if (!device->name) {
393 sysfs_remove_link(&dev->kobj, "iommu_group");
394 kfree(device);
395 return -ENOMEM;
396 }
397
398 ret = sysfs_create_link_nowarn(group->devices_kobj,
399 &dev->kobj, device->name);
400 if (ret) {
401 kfree(device->name);
402 if (ret == -EEXIST && i >= 0) {
403 /*
404 * Account for the slim chance of collision
405 * and append an instance to the name.
406 */
407 device->name = kasprintf(GFP_KERNEL, "%s.%d",
408 kobject_name(&dev->kobj), i++);
409 goto rename;
410 }
411
412 sysfs_remove_link(&dev->kobj, "iommu_group");
413 kfree(device);
414 return ret;
415 }
416
417 kobject_get(group->devices_kobj);
418
419 dev->iommu_group = group;
420
Joerg Roedelbeed2822015-05-28 18:41:34 +0200421 iommu_group_create_direct_mappings(group, dev);
422
Alex Williamsond72e31c2012-05-30 14:18:53 -0600423 mutex_lock(&group->mutex);
424 list_add_tail(&device->list, &group->devices);
Joerg Roedele39cb8a2015-05-28 18:41:31 +0200425 if (group->domain)
426 __iommu_attach_device(group->domain, dev);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600427 mutex_unlock(&group->mutex);
428
429 /* Notify any listeners about change to group. */
430 blocking_notifier_call_chain(&group->notifier,
431 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
Shuah Khand1cf7e82013-08-15 11:59:24 -0600432
433 trace_add_device_to_group(group->id, dev);
Joerg Roedel269aa802015-05-28 18:41:25 +0200434
435 pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
436
Alex Williamsond72e31c2012-05-30 14:18:53 -0600437 return 0;
438}
439EXPORT_SYMBOL_GPL(iommu_group_add_device);
440
441/**
442 * iommu_group_remove_device - remove a device from it's current group
443 * @dev: device to be removed
444 *
445 * This function is called by an iommu driver to remove the device from
446 * it's current group. This decrements the iommu group reference count.
447 */
448void iommu_group_remove_device(struct device *dev)
449{
450 struct iommu_group *group = dev->iommu_group;
451 struct iommu_device *tmp_device, *device = NULL;
452
Joerg Roedel269aa802015-05-28 18:41:25 +0200453 pr_info("Removing device %s from group %d\n", dev_name(dev), group->id);
454
Alex Williamsond72e31c2012-05-30 14:18:53 -0600455 /* Pre-notify listeners that a device is being removed. */
456 blocking_notifier_call_chain(&group->notifier,
457 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
458
459 mutex_lock(&group->mutex);
460 list_for_each_entry(tmp_device, &group->devices, list) {
461 if (tmp_device->dev == dev) {
462 device = tmp_device;
463 list_del(&device->list);
464 break;
465 }
466 }
467 mutex_unlock(&group->mutex);
468
469 if (!device)
470 return;
471
472 sysfs_remove_link(group->devices_kobj, device->name);
473 sysfs_remove_link(&dev->kobj, "iommu_group");
474
Shuah Khan2e757082013-08-15 11:59:25 -0600475 trace_remove_device_from_group(group->id, dev);
476
Alex Williamsond72e31c2012-05-30 14:18:53 -0600477 kfree(device->name);
478 kfree(device);
479 dev->iommu_group = NULL;
480 kobject_put(group->devices_kobj);
481}
482EXPORT_SYMBOL_GPL(iommu_group_remove_device);
483
Joerg Roedel426a2732015-05-28 18:41:30 +0200484static int iommu_group_device_count(struct iommu_group *group)
485{
486 struct iommu_device *entry;
487 int ret = 0;
488
489 list_for_each_entry(entry, &group->devices, list)
490 ret++;
491
492 return ret;
493}
494
Alex Williamsond72e31c2012-05-30 14:18:53 -0600495/**
496 * iommu_group_for_each_dev - iterate over each device in the group
497 * @group: the group
498 * @data: caller opaque data to be passed to callback function
499 * @fn: caller supplied callback function
500 *
501 * This function is called by group users to iterate over group devices.
502 * Callers should hold a reference count to the group during callback.
503 * The group->mutex is held across callbacks, which will block calls to
504 * iommu_group_add/remove_device.
505 */
Joerg Roedele39cb8a2015-05-28 18:41:31 +0200506static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
507 int (*fn)(struct device *, void *))
Alex Williamsond72e31c2012-05-30 14:18:53 -0600508{
509 struct iommu_device *device;
510 int ret = 0;
511
Alex Williamsond72e31c2012-05-30 14:18:53 -0600512 list_for_each_entry(device, &group->devices, list) {
513 ret = fn(device->dev, data);
514 if (ret)
515 break;
516 }
Joerg Roedele39cb8a2015-05-28 18:41:31 +0200517 return ret;
518}
519
520
521int iommu_group_for_each_dev(struct iommu_group *group, void *data,
522 int (*fn)(struct device *, void *))
523{
524 int ret;
525
526 mutex_lock(&group->mutex);
527 ret = __iommu_group_for_each_dev(group, data, fn);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600528 mutex_unlock(&group->mutex);
Joerg Roedele39cb8a2015-05-28 18:41:31 +0200529
Alex Williamsond72e31c2012-05-30 14:18:53 -0600530 return ret;
531}
532EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
533
534/**
535 * iommu_group_get - Return the group for a device and increment reference
536 * @dev: get the group that this device belongs to
537 *
538 * This function is called by iommu drivers and users to get the group
539 * for the specified device. If found, the group is returned and the group
540 * reference in incremented, else NULL.
541 */
542struct iommu_group *iommu_group_get(struct device *dev)
543{
544 struct iommu_group *group = dev->iommu_group;
545
546 if (group)
547 kobject_get(group->devices_kobj);
548
549 return group;
550}
551EXPORT_SYMBOL_GPL(iommu_group_get);
552
553/**
554 * iommu_group_put - Decrement group reference
555 * @group: the group to use
556 *
557 * This function is called by iommu drivers and users to release the
558 * iommu group. Once the reference count is zero, the group is released.
559 */
560void iommu_group_put(struct iommu_group *group)
561{
562 if (group)
563 kobject_put(group->devices_kobj);
564}
565EXPORT_SYMBOL_GPL(iommu_group_put);
566
567/**
568 * iommu_group_register_notifier - Register a notifier for group changes
569 * @group: the group to watch
570 * @nb: notifier block to signal
571 *
572 * This function allows iommu group users to track changes in a group.
573 * See include/linux/iommu.h for actions sent via this notifier. Caller
574 * should hold a reference to the group throughout notifier registration.
575 */
576int iommu_group_register_notifier(struct iommu_group *group,
577 struct notifier_block *nb)
578{
579 return blocking_notifier_chain_register(&group->notifier, nb);
580}
581EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
582
583/**
584 * iommu_group_unregister_notifier - Unregister a notifier
585 * @group: the group to watch
586 * @nb: notifier block to signal
587 *
588 * Unregister a previously registered group notifier block.
589 */
590int iommu_group_unregister_notifier(struct iommu_group *group,
591 struct notifier_block *nb)
592{
593 return blocking_notifier_chain_unregister(&group->notifier, nb);
594}
595EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
596
597/**
598 * iommu_group_id - Return ID for a group
599 * @group: the group to ID
600 *
601 * Return the unique ID for the group matching the sysfs group number.
602 */
603int iommu_group_id(struct iommu_group *group)
604{
605 return group->id;
606}
607EXPORT_SYMBOL_GPL(iommu_group_id);
Alex Williamson14604322011-10-21 15:56:05 -0400608
Alex Williamsonf096c062014-09-19 10:03:06 -0600609static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
610 unsigned long *devfns);
611
Alex Williamson104a1c12014-07-03 09:51:18 -0600612/*
613 * To consider a PCI device isolated, we require ACS to support Source
614 * Validation, Request Redirection, Completer Redirection, and Upstream
615 * Forwarding. This effectively means that devices cannot spoof their
616 * requester ID, requests and completions cannot be redirected, and all
617 * transactions are forwarded upstream, even as it passes through a
618 * bridge where the target device is downstream.
619 */
620#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
621
Alex Williamsonf096c062014-09-19 10:03:06 -0600622/*
623 * For multifunction devices which are not isolated from each other, find
624 * all the other non-isolated functions and look for existing groups. For
625 * each function, we also need to look for aliases to or from other devices
626 * that may already have a group.
627 */
628static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
629 unsigned long *devfns)
630{
631 struct pci_dev *tmp = NULL;
632 struct iommu_group *group;
633
634 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
635 return NULL;
636
637 for_each_pci_dev(tmp) {
638 if (tmp == pdev || tmp->bus != pdev->bus ||
639 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
640 pci_acs_enabled(tmp, REQ_ACS_FLAGS))
641 continue;
642
643 group = get_pci_alias_group(tmp, devfns);
644 if (group) {
645 pci_dev_put(tmp);
646 return group;
647 }
648 }
649
650 return NULL;
651}
652
653/*
Jacek Lawrynowicz338c3142016-03-03 15:38:02 +0100654 * Look for aliases to or from the given device for existing groups. DMA
655 * aliases are only supported on the same bus, therefore the search
Alex Williamsonf096c062014-09-19 10:03:06 -0600656 * space is quite small (especially since we're really only looking at pcie
657 * device, and therefore only expect multiple slots on the root complex or
658 * downstream switch ports). It's conceivable though that a pair of
659 * multifunction devices could have aliases between them that would cause a
660 * loop. To prevent this, we use a bitmap to track where we've been.
661 */
662static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
663 unsigned long *devfns)
664{
665 struct pci_dev *tmp = NULL;
666 struct iommu_group *group;
667
668 if (test_and_set_bit(pdev->devfn & 0xff, devfns))
669 return NULL;
670
671 group = iommu_group_get(&pdev->dev);
672 if (group)
673 return group;
674
675 for_each_pci_dev(tmp) {
676 if (tmp == pdev || tmp->bus != pdev->bus)
677 continue;
678
679 /* We alias them or they alias us */
Jacek Lawrynowicz338c3142016-03-03 15:38:02 +0100680 if (pci_devs_are_dma_aliases(pdev, tmp)) {
Alex Williamsonf096c062014-09-19 10:03:06 -0600681 group = get_pci_alias_group(tmp, devfns);
682 if (group) {
683 pci_dev_put(tmp);
684 return group;
685 }
686
687 group = get_pci_function_alias_group(tmp, devfns);
688 if (group) {
689 pci_dev_put(tmp);
690 return group;
691 }
692 }
693 }
694
695 return NULL;
696}
697
Alex Williamson104a1c12014-07-03 09:51:18 -0600698struct group_for_pci_data {
699 struct pci_dev *pdev;
700 struct iommu_group *group;
701};
702
703/*
704 * DMA alias iterator callback, return the last seen device. Stop and return
705 * the IOMMU group if we find one along the way.
706 */
707static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
708{
709 struct group_for_pci_data *data = opaque;
710
711 data->pdev = pdev;
712 data->group = iommu_group_get(&pdev->dev);
713
714 return data->group != NULL;
715}
716
717/*
Joerg Roedel6eab5562015-10-21 23:51:38 +0200718 * Generic device_group call-back function. It just allocates one
719 * iommu-group per device.
720 */
721struct iommu_group *generic_device_group(struct device *dev)
722{
723 struct iommu_group *group;
724
725 group = iommu_group_alloc();
726 if (IS_ERR(group))
727 return NULL;
728
729 return group;
730}
731
732/*
Alex Williamson104a1c12014-07-03 09:51:18 -0600733 * Use standard PCI bus topology, isolation features, and DMA alias quirks
734 * to find or create an IOMMU group for a device.
735 */
Joerg Roedel5e622922015-10-21 23:51:37 +0200736struct iommu_group *pci_device_group(struct device *dev)
Alex Williamson104a1c12014-07-03 09:51:18 -0600737{
Joerg Roedel5e622922015-10-21 23:51:37 +0200738 struct pci_dev *pdev = to_pci_dev(dev);
Alex Williamson104a1c12014-07-03 09:51:18 -0600739 struct group_for_pci_data data;
740 struct pci_bus *bus;
741 struct iommu_group *group = NULL;
Alex Williamsonf096c062014-09-19 10:03:06 -0600742 u64 devfns[4] = { 0 };
Alex Williamson104a1c12014-07-03 09:51:18 -0600743
Joerg Roedel5e622922015-10-21 23:51:37 +0200744 if (WARN_ON(!dev_is_pci(dev)))
745 return ERR_PTR(-EINVAL);
746
Alex Williamson104a1c12014-07-03 09:51:18 -0600747 /*
748 * Find the upstream DMA alias for the device. A device must not
749 * be aliased due to topology in order to have its own IOMMU group.
750 * If we find an alias along the way that already belongs to a
751 * group, use it.
752 */
753 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
754 return data.group;
755
756 pdev = data.pdev;
757
758 /*
759 * Continue upstream from the point of minimum IOMMU granularity
760 * due to aliases to the point where devices are protected from
761 * peer-to-peer DMA by PCI ACS. Again, if we find an existing
762 * group, use it.
763 */
764 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
765 if (!bus->self)
766 continue;
767
768 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
769 break;
770
771 pdev = bus->self;
772
773 group = iommu_group_get(&pdev->dev);
774 if (group)
775 return group;
776 }
777
778 /*
Alex Williamsonf096c062014-09-19 10:03:06 -0600779 * Look for existing groups on device aliases. If we alias another
780 * device or another device aliases us, use the same group.
Alex Williamson104a1c12014-07-03 09:51:18 -0600781 */
Alex Williamsonf096c062014-09-19 10:03:06 -0600782 group = get_pci_alias_group(pdev, (unsigned long *)devfns);
783 if (group)
784 return group;
Alex Williamson104a1c12014-07-03 09:51:18 -0600785
786 /*
Alex Williamsonf096c062014-09-19 10:03:06 -0600787 * Look for existing groups on non-isolated functions on the same
788 * slot and aliases of those funcions, if any. No need to clear
789 * the search bitmap, the tested devfns are still valid.
Alex Williamson104a1c12014-07-03 09:51:18 -0600790 */
Alex Williamsonf096c062014-09-19 10:03:06 -0600791 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
792 if (group)
793 return group;
Alex Williamson104a1c12014-07-03 09:51:18 -0600794
795 /* No shared group found, allocate new */
Joerg Roedel53723dc2015-05-28 18:41:29 +0200796 group = iommu_group_alloc();
Dan Carpenter409e5532015-06-10 13:59:27 +0300797 if (IS_ERR(group))
798 return NULL;
799
Joerg Roedel53723dc2015-05-28 18:41:29 +0200800 return group;
Alex Williamson104a1c12014-07-03 09:51:18 -0600801}
802
803/**
804 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
805 * @dev: target device
806 *
807 * This function is intended to be called by IOMMU drivers and extended to
808 * support common, bus-defined algorithms when determining or creating the
809 * IOMMU group for a device. On success, the caller will hold a reference
810 * to the returned IOMMU group, which will already include the provided
811 * device. The reference should be released with iommu_group_put().
812 */
813struct iommu_group *iommu_group_get_for_dev(struct device *dev)
814{
Joerg Roedel46c6b2b2015-10-21 23:51:36 +0200815 const struct iommu_ops *ops = dev->bus->iommu_ops;
Joerg Roedelc4a783b2014-08-21 22:32:08 +0200816 struct iommu_group *group;
Alex Williamson104a1c12014-07-03 09:51:18 -0600817 int ret;
818
819 group = iommu_group_get(dev);
820 if (group)
821 return group;
822
Joerg Roedel46c6b2b2015-10-21 23:51:36 +0200823 group = ERR_PTR(-EINVAL);
Joerg Roedelc4a783b2014-08-21 22:32:08 +0200824
Joerg Roedel46c6b2b2015-10-21 23:51:36 +0200825 if (ops && ops->device_group)
826 group = ops->device_group(dev);
Alex Williamson104a1c12014-07-03 09:51:18 -0600827
828 if (IS_ERR(group))
829 return group;
830
Joerg Roedel12282362015-10-21 23:51:43 +0200831 /*
832 * Try to allocate a default domain - needs support from the
833 * IOMMU driver.
834 */
835 if (!group->default_domain) {
836 group->default_domain = __iommu_domain_alloc(dev->bus,
837 IOMMU_DOMAIN_DMA);
Joerg Roedeleebb8032016-04-04 15:47:48 +0200838 if (!group->domain)
839 group->domain = group->default_domain;
Joerg Roedel12282362015-10-21 23:51:43 +0200840 }
841
Alex Williamson104a1c12014-07-03 09:51:18 -0600842 ret = iommu_group_add_device(group, dev);
843 if (ret) {
844 iommu_group_put(group);
845 return ERR_PTR(ret);
846 }
847
848 return group;
849}
850
Joerg Roedel6827ca82015-05-28 18:41:35 +0200851struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
852{
853 return group->default_domain;
854}
855
Alex Williamson14604322011-10-21 15:56:05 -0400856static int add_iommu_group(struct device *dev, void *data)
857{
Thierry Redingb22f6432014-06-27 09:03:12 +0200858 struct iommu_callback_data *cb = data;
859 const struct iommu_ops *ops = cb->ops;
Joerg Roedel38667f12015-06-29 10:16:08 +0200860 int ret;
Alex Williamson14604322011-10-21 15:56:05 -0400861
Alex Williamsond72e31c2012-05-30 14:18:53 -0600862 if (!ops->add_device)
Marek Szyprowski461bfb3f2014-11-19 11:15:31 +0000863 return 0;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600864
865 WARN_ON(dev->iommu_group);
866
Joerg Roedel38667f12015-06-29 10:16:08 +0200867 ret = ops->add_device(dev);
868
869 /*
870 * We ignore -ENODEV errors for now, as they just mean that the
871 * device is not translated by an IOMMU. We still care about
872 * other errors and fail to initialize when they happen.
873 */
874 if (ret == -ENODEV)
875 ret = 0;
876
877 return ret;
Alex Williamson14604322011-10-21 15:56:05 -0400878}
879
Joerg Roedel8da30142015-05-28 18:41:27 +0200880static int remove_iommu_group(struct device *dev, void *data)
881{
882 struct iommu_callback_data *cb = data;
883 const struct iommu_ops *ops = cb->ops;
884
885 if (ops->remove_device && dev->iommu_group)
886 ops->remove_device(dev);
Alex Williamson14604322011-10-21 15:56:05 -0400887
888 return 0;
889}
890
Alex Williamsond72e31c2012-05-30 14:18:53 -0600891static int iommu_bus_notifier(struct notifier_block *nb,
892 unsigned long action, void *data)
Alex Williamson14604322011-10-21 15:56:05 -0400893{
894 struct device *dev = data;
Thierry Redingb22f6432014-06-27 09:03:12 +0200895 const struct iommu_ops *ops = dev->bus->iommu_ops;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600896 struct iommu_group *group;
897 unsigned long group_action = 0;
Alex Williamson14604322011-10-21 15:56:05 -0400898
Alex Williamsond72e31c2012-05-30 14:18:53 -0600899 /*
900 * ADD/DEL call into iommu driver ops if provided, which may
901 * result in ADD/DEL notifiers to group->notifier
902 */
903 if (action == BUS_NOTIFY_ADD_DEVICE) {
904 if (ops->add_device)
905 return ops->add_device(dev);
Joerg Roedel843cb6d2015-05-28 18:41:28 +0200906 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
Alex Williamsond72e31c2012-05-30 14:18:53 -0600907 if (ops->remove_device && dev->iommu_group) {
908 ops->remove_device(dev);
909 return 0;
910 }
911 }
Alex Williamson14604322011-10-21 15:56:05 -0400912
Alex Williamsond72e31c2012-05-30 14:18:53 -0600913 /*
914 * Remaining BUS_NOTIFYs get filtered and republished to the
915 * group, if anyone is listening
916 */
917 group = iommu_group_get(dev);
918 if (!group)
919 return 0;
920
921 switch (action) {
922 case BUS_NOTIFY_BIND_DRIVER:
923 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
924 break;
925 case BUS_NOTIFY_BOUND_DRIVER:
926 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
927 break;
928 case BUS_NOTIFY_UNBIND_DRIVER:
929 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
930 break;
931 case BUS_NOTIFY_UNBOUND_DRIVER:
932 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
933 break;
934 }
935
936 if (group_action)
937 blocking_notifier_call_chain(&group->notifier,
938 group_action, dev);
939
940 iommu_group_put(group);
Alex Williamson14604322011-10-21 15:56:05 -0400941 return 0;
942}
943
Mark Salterfb3e3062014-09-21 13:58:24 -0400944static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100945{
Mark Salterfb3e3062014-09-21 13:58:24 -0400946 int err;
947 struct notifier_block *nb;
Thierry Redingb22f6432014-06-27 09:03:12 +0200948 struct iommu_callback_data cb = {
949 .ops = ops,
950 };
951
Mark Salterfb3e3062014-09-21 13:58:24 -0400952 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
953 if (!nb)
954 return -ENOMEM;
955
956 nb->notifier_call = iommu_bus_notifier;
957
958 err = bus_register_notifier(bus, nb);
Joerg Roedel8da30142015-05-28 18:41:27 +0200959 if (err)
960 goto out_free;
Heiko Stübnerd7da6bd2014-10-29 01:22:56 +0100961
962 err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
Joerg Roedel8da30142015-05-28 18:41:27 +0200963 if (err)
964 goto out_err;
965
Heiko Stübnerd7da6bd2014-10-29 01:22:56 +0100966
967 return 0;
Joerg Roedel8da30142015-05-28 18:41:27 +0200968
969out_err:
970 /* Clean up */
971 bus_for_each_dev(bus, NULL, &cb, remove_iommu_group);
972 bus_unregister_notifier(bus, nb);
973
974out_free:
975 kfree(nb);
976
977 return err;
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100978}
979
Joerg Roedelff217762011-08-26 16:48:26 +0200980/**
981 * bus_set_iommu - set iommu-callbacks for the bus
982 * @bus: bus.
983 * @ops: the callbacks provided by the iommu-driver
984 *
985 * This function is called by an iommu driver to set the iommu methods
986 * used for a particular bus. Drivers for devices on that bus can use
987 * the iommu-api after these ops are registered.
988 * This special function is needed because IOMMUs are usually devices on
989 * the bus itself, so the iommu drivers are not initialized when the bus
990 * is set up. With this function the iommu-driver can set the iommu-ops
991 * afterwards.
992 */
Thierry Redingb22f6432014-06-27 09:03:12 +0200993int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100994{
Heiko Stübnerd7da6bd2014-10-29 01:22:56 +0100995 int err;
996
Joerg Roedelff217762011-08-26 16:48:26 +0200997 if (bus->iommu_ops != NULL)
998 return -EBUSY;
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100999
Joerg Roedelff217762011-08-26 16:48:26 +02001000 bus->iommu_ops = ops;
1001
1002 /* Do IOMMU specific setup for this bus-type */
Heiko Stübnerd7da6bd2014-10-29 01:22:56 +01001003 err = iommu_bus_init(bus, ops);
1004 if (err)
1005 bus->iommu_ops = NULL;
1006
1007 return err;
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001008}
Joerg Roedelff217762011-08-26 16:48:26 +02001009EXPORT_SYMBOL_GPL(bus_set_iommu);
1010
Joerg Roedela1b60c12011-09-06 18:46:34 +02001011bool iommu_present(struct bus_type *bus)
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001012{
Joerg Roedel94441c32011-09-06 18:58:54 +02001013 return bus->iommu_ops != NULL;
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001014}
Joerg Roedela1b60c12011-09-06 18:46:34 +02001015EXPORT_SYMBOL_GPL(iommu_present);
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001016
Joerg Roedel3c0e0ca2014-09-03 18:47:25 +02001017bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1018{
1019 if (!bus->iommu_ops || !bus->iommu_ops->capable)
1020 return false;
1021
1022 return bus->iommu_ops->capable(cap);
1023}
1024EXPORT_SYMBOL_GPL(iommu_capable);
1025
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -04001026/**
1027 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1028 * @domain: iommu domain
1029 * @handler: fault handler
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +03001030 * @token: user data, will be passed back to the fault handler
Ohad Ben-Cohen0ed6d2d2011-09-27 07:36:40 -04001031 *
1032 * This function should be used by IOMMU users which want to be notified
1033 * whenever an IOMMU fault happens.
1034 *
1035 * The fault handler itself should return 0 on success, and an appropriate
1036 * error code otherwise.
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -04001037 */
1038void iommu_set_fault_handler(struct iommu_domain *domain,
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +03001039 iommu_fault_handler_t handler,
1040 void *token)
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -04001041{
1042 BUG_ON(!domain);
1043
1044 domain->handler = handler;
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +03001045 domain->handler_token = token;
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -04001046}
Ohad Ben-Cohen30bd9182011-09-26 09:11:46 -04001047EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -04001048
Joerg Roedel53723dc2015-05-28 18:41:29 +02001049static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1050 unsigned type)
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001051{
1052 struct iommu_domain *domain;
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001053
Joerg Roedel94441c32011-09-06 18:58:54 +02001054 if (bus == NULL || bus->iommu_ops == NULL)
Joerg Roedel905d66c2011-09-06 16:03:26 +02001055 return NULL;
1056
Joerg Roedel53723dc2015-05-28 18:41:29 +02001057 domain = bus->iommu_ops->domain_alloc(type);
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001058 if (!domain)
1059 return NULL;
1060
Joerg Roedel8539c7c2015-03-26 13:43:05 +01001061 domain->ops = bus->iommu_ops;
Joerg Roedel53723dc2015-05-28 18:41:29 +02001062 domain->type = type;
Robin Murphyd16e0fa2016-04-07 18:42:06 +01001063 /* Assume all sizes by default; the driver may override this later */
1064 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
Joerg Roedel905d66c2011-09-06 16:03:26 +02001065
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001066 return domain;
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001067}
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001068
Joerg Roedel53723dc2015-05-28 18:41:29 +02001069struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1070{
1071 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001072}
1073EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1074
1075void iommu_domain_free(struct iommu_domain *domain)
1076{
Joerg Roedel89be34a2015-03-26 13:43:19 +01001077 domain->ops->domain_free(domain);
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001078}
1079EXPORT_SYMBOL_GPL(iommu_domain_free);
1080
Joerg Roedel426a2732015-05-28 18:41:30 +02001081static int __iommu_attach_device(struct iommu_domain *domain,
1082 struct device *dev)
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001083{
Shuah Khanb54db772013-08-15 11:59:26 -06001084 int ret;
Joerg Roedele5aa7f02011-09-06 16:44:29 +02001085 if (unlikely(domain->ops->attach_dev == NULL))
1086 return -ENODEV;
1087
Shuah Khanb54db772013-08-15 11:59:26 -06001088 ret = domain->ops->attach_dev(domain, dev);
1089 if (!ret)
1090 trace_attach_device_to_domain(dev);
1091 return ret;
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001092}
Joerg Roedel426a2732015-05-28 18:41:30 +02001093
1094int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1095{
1096 struct iommu_group *group;
1097 int ret;
1098
1099 group = iommu_group_get(dev);
1100 /* FIXME: Remove this when groups a mandatory for iommu drivers */
1101 if (group == NULL)
1102 return __iommu_attach_device(domain, dev);
1103
1104 /*
1105 * We have a group - lock it to make sure the device-count doesn't
1106 * change while we are attaching
1107 */
1108 mutex_lock(&group->mutex);
1109 ret = -EINVAL;
1110 if (iommu_group_device_count(group) != 1)
1111 goto out_unlock;
1112
Joerg Roedele39cb8a2015-05-28 18:41:31 +02001113 ret = __iommu_attach_group(domain, group);
Joerg Roedel426a2732015-05-28 18:41:30 +02001114
1115out_unlock:
1116 mutex_unlock(&group->mutex);
1117 iommu_group_put(group);
1118
1119 return ret;
1120}
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001121EXPORT_SYMBOL_GPL(iommu_attach_device);
1122
Joerg Roedel426a2732015-05-28 18:41:30 +02001123static void __iommu_detach_device(struct iommu_domain *domain,
1124 struct device *dev)
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001125{
Joerg Roedele5aa7f02011-09-06 16:44:29 +02001126 if (unlikely(domain->ops->detach_dev == NULL))
1127 return;
1128
1129 domain->ops->detach_dev(domain, dev);
Shuah Khan69980632013-08-15 11:59:27 -06001130 trace_detach_device_from_domain(dev);
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001131}
Joerg Roedel426a2732015-05-28 18:41:30 +02001132
1133void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
1134{
1135 struct iommu_group *group;
1136
1137 group = iommu_group_get(dev);
1138 /* FIXME: Remove this when groups a mandatory for iommu drivers */
1139 if (group == NULL)
1140 return __iommu_detach_device(domain, dev);
1141
1142 mutex_lock(&group->mutex);
1143 if (iommu_group_device_count(group) != 1) {
1144 WARN_ON(1);
1145 goto out_unlock;
1146 }
1147
Joerg Roedele39cb8a2015-05-28 18:41:31 +02001148 __iommu_detach_group(domain, group);
Joerg Roedel426a2732015-05-28 18:41:30 +02001149
1150out_unlock:
1151 mutex_unlock(&group->mutex);
1152 iommu_group_put(group);
1153}
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001154EXPORT_SYMBOL_GPL(iommu_detach_device);
1155
Joerg Roedel2c1296d2015-05-28 18:41:32 +02001156struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1157{
1158 struct iommu_domain *domain;
1159 struct iommu_group *group;
1160
1161 group = iommu_group_get(dev);
1162 /* FIXME: Remove this when groups a mandatory for iommu drivers */
1163 if (group == NULL)
1164 return NULL;
1165
1166 domain = group->domain;
1167
1168 iommu_group_put(group);
1169
1170 return domain;
1171}
1172EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
1173
Alex Williamsond72e31c2012-05-30 14:18:53 -06001174/*
1175 * IOMMU groups are really the natrual working unit of the IOMMU, but
1176 * the IOMMU API works on domains and devices. Bridge that gap by
1177 * iterating over the devices in a group. Ideally we'd have a single
1178 * device which represents the requestor ID of the group, but we also
1179 * allow IOMMU drivers to create policy defined minimum sets, where
1180 * the physical hardware may be able to distiguish members, but we
1181 * wish to group them at a higher level (ex. untrusted multi-function
1182 * PCI devices). Thus we attach each device.
1183 */
1184static int iommu_group_do_attach_device(struct device *dev, void *data)
1185{
1186 struct iommu_domain *domain = data;
1187
Joerg Roedel426a2732015-05-28 18:41:30 +02001188 return __iommu_attach_device(domain, dev);
Alex Williamsond72e31c2012-05-30 14:18:53 -06001189}
1190
Joerg Roedele39cb8a2015-05-28 18:41:31 +02001191static int __iommu_attach_group(struct iommu_domain *domain,
1192 struct iommu_group *group)
1193{
1194 int ret;
1195
1196 if (group->default_domain && group->domain != group->default_domain)
1197 return -EBUSY;
1198
1199 ret = __iommu_group_for_each_dev(group, domain,
1200 iommu_group_do_attach_device);
1201 if (ret == 0)
1202 group->domain = domain;
1203
1204 return ret;
Alex Williamsond72e31c2012-05-30 14:18:53 -06001205}
1206
1207int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
1208{
Joerg Roedele39cb8a2015-05-28 18:41:31 +02001209 int ret;
1210
1211 mutex_lock(&group->mutex);
1212 ret = __iommu_attach_group(domain, group);
1213 mutex_unlock(&group->mutex);
1214
1215 return ret;
Alex Williamsond72e31c2012-05-30 14:18:53 -06001216}
1217EXPORT_SYMBOL_GPL(iommu_attach_group);
1218
1219static int iommu_group_do_detach_device(struct device *dev, void *data)
1220{
1221 struct iommu_domain *domain = data;
1222
Joerg Roedel426a2732015-05-28 18:41:30 +02001223 __iommu_detach_device(domain, dev);
Alex Williamsond72e31c2012-05-30 14:18:53 -06001224
1225 return 0;
1226}
1227
Joerg Roedele39cb8a2015-05-28 18:41:31 +02001228static void __iommu_detach_group(struct iommu_domain *domain,
1229 struct iommu_group *group)
1230{
1231 int ret;
1232
1233 if (!group->default_domain) {
1234 __iommu_group_for_each_dev(group, domain,
1235 iommu_group_do_detach_device);
1236 group->domain = NULL;
1237 return;
1238 }
1239
1240 if (group->domain == group->default_domain)
1241 return;
1242
1243 /* Detach by re-attaching to the default domain */
1244 ret = __iommu_group_for_each_dev(group, group->default_domain,
1245 iommu_group_do_attach_device);
1246 if (ret != 0)
1247 WARN_ON(1);
1248 else
1249 group->domain = group->default_domain;
1250}
1251
Alex Williamsond72e31c2012-05-30 14:18:53 -06001252void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
1253{
Joerg Roedele39cb8a2015-05-28 18:41:31 +02001254 mutex_lock(&group->mutex);
1255 __iommu_detach_group(domain, group);
1256 mutex_unlock(&group->mutex);
Alex Williamsond72e31c2012-05-30 14:18:53 -06001257}
1258EXPORT_SYMBOL_GPL(iommu_detach_group);
1259
Varun Sethibb5547ac2013-03-29 01:23:58 +05301260phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001261{
Joerg Roedele5aa7f02011-09-06 16:44:29 +02001262 if (unlikely(domain->ops->iova_to_phys == NULL))
1263 return 0;
1264
1265 return domain->ops->iova_to_phys(domain, iova);
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001266}
1267EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
Sheng Yangdbb9fd82009-03-18 15:33:06 +08001268
Alex Williamsonbd139692013-06-17 19:57:34 -06001269static size_t iommu_pgsize(struct iommu_domain *domain,
1270 unsigned long addr_merge, size_t size)
1271{
1272 unsigned int pgsize_idx;
1273 size_t pgsize;
1274
1275 /* Max page size that still fits into 'size' */
1276 pgsize_idx = __fls(size);
1277
1278 /* need to consider alignment requirements ? */
1279 if (likely(addr_merge)) {
1280 /* Max page size allowed by address */
1281 unsigned int align_pgsize_idx = __ffs(addr_merge);
1282 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
1283 }
1284
1285 /* build a mask of acceptable page sizes */
1286 pgsize = (1UL << (pgsize_idx + 1)) - 1;
1287
1288 /* throw away page sizes not supported by the hardware */
Robin Murphyd16e0fa2016-04-07 18:42:06 +01001289 pgsize &= domain->pgsize_bitmap;
Alex Williamsonbd139692013-06-17 19:57:34 -06001290
1291 /* make sure we're still sane */
1292 BUG_ON(!pgsize);
1293
1294 /* pick the biggest page */
1295 pgsize_idx = __fls(pgsize);
1296 pgsize = 1UL << pgsize_idx;
1297
1298 return pgsize;
1299}
1300
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001301int iommu_map(struct iommu_domain *domain, unsigned long iova,
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001302 phys_addr_t paddr, size_t size, int prot)
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001303{
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001304 unsigned long orig_iova = iova;
1305 unsigned int min_pagesz;
1306 size_t orig_size = size;
Yoshihiro Shimoda06bfcaa2016-02-10 10:18:04 +09001307 phys_addr_t orig_paddr = paddr;
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001308 int ret = 0;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001309
Joerg Roedel9db4ad92014-08-19 00:19:26 +02001310 if (unlikely(domain->ops->map == NULL ||
Robin Murphyd16e0fa2016-04-07 18:42:06 +01001311 domain->pgsize_bitmap == 0UL))
Joerg Roedele5aa7f02011-09-06 16:44:29 +02001312 return -ENODEV;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001313
Joerg Roedela10315e2015-03-26 13:43:06 +01001314 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1315 return -EINVAL;
1316
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001317 /* find out the minimum page size supported */
Robin Murphyd16e0fa2016-04-07 18:42:06 +01001318 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001319
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001320 /*
1321 * both the virtual address and the physical one, as well as
1322 * the size of the mapping, must be aligned (at least) to the
1323 * size of the smallest page supported by the hardware
1324 */
1325 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
Fabio Estevamabedb042013-08-22 10:25:42 -03001326 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
Joe Perches6197ca82013-06-23 12:29:04 -07001327 iova, &paddr, size, min_pagesz);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001328 return -EINVAL;
1329 }
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001330
Fabio Estevamabedb042013-08-22 10:25:42 -03001331 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001332
1333 while (size) {
Alex Williamsonbd139692013-06-17 19:57:34 -06001334 size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001335
Fabio Estevamabedb042013-08-22 10:25:42 -03001336 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
Joe Perches6197ca82013-06-23 12:29:04 -07001337 iova, &paddr, pgsize);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001338
1339 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
1340 if (ret)
1341 break;
1342
1343 iova += pgsize;
1344 paddr += pgsize;
1345 size -= pgsize;
1346 }
1347
1348 /* unroll mapping in case something went wrong */
1349 if (ret)
1350 iommu_unmap(domain, orig_iova, orig_size - size);
Shuah Khane0be7c82013-08-15 11:59:28 -06001351 else
Yoshihiro Shimoda06bfcaa2016-02-10 10:18:04 +09001352 trace_map(orig_iova, orig_paddr, orig_size);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001353
1354 return ret;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001355}
1356EXPORT_SYMBOL_GPL(iommu_map);
1357
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001358size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001359{
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001360 size_t unmapped_page, unmapped = 0;
1361 unsigned int min_pagesz;
Shuah Khan6fd492f2015-01-16 16:47:19 -07001362 unsigned long orig_iova = iova;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001363
Joerg Roedel57886512013-01-29 13:41:09 +01001364 if (unlikely(domain->ops->unmap == NULL ||
Robin Murphyd16e0fa2016-04-07 18:42:06 +01001365 domain->pgsize_bitmap == 0UL))
Joerg Roedele5aa7f02011-09-06 16:44:29 +02001366 return -ENODEV;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001367
Joerg Roedela10315e2015-03-26 13:43:06 +01001368 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1369 return -EINVAL;
1370
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001371 /* find out the minimum page size supported */
Robin Murphyd16e0fa2016-04-07 18:42:06 +01001372 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001373
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001374 /*
1375 * The virtual address, as well as the size of the mapping, must be
1376 * aligned (at least) to the size of the smallest page supported
1377 * by the hardware
1378 */
1379 if (!IS_ALIGNED(iova | size, min_pagesz)) {
Joe Perches6197ca82013-06-23 12:29:04 -07001380 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
1381 iova, size, min_pagesz);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001382 return -EINVAL;
1383 }
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001384
Joe Perches6197ca82013-06-23 12:29:04 -07001385 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02001386
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001387 /*
1388 * Keep iterating until we either unmap 'size' bytes (or more)
1389 * or we hit an area that isn't mapped.
1390 */
1391 while (unmapped < size) {
Alex Williamsonbd139692013-06-17 19:57:34 -06001392 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001393
Alex Williamsonbd139692013-06-17 19:57:34 -06001394 unmapped_page = domain->ops->unmap(domain, iova, pgsize);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001395 if (!unmapped_page)
1396 break;
1397
Joe Perches6197ca82013-06-23 12:29:04 -07001398 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
1399 iova, unmapped_page);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001400
1401 iova += unmapped_page;
1402 unmapped += unmapped_page;
1403 }
1404
Shuah Khandb8614d2015-01-16 20:53:17 -07001405 trace_unmap(orig_iova, size, unmapped);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001406 return unmapped;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001407}
1408EXPORT_SYMBOL_GPL(iommu_unmap);
Alex Williamson14604322011-10-21 15:56:05 -04001409
Olav Haugan315786e2014-10-25 09:55:16 -07001410size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
1411 struct scatterlist *sg, unsigned int nents, int prot)
1412{
Joerg Roedel38ec0102014-11-04 14:53:51 +01001413 struct scatterlist *s;
Olav Haugan315786e2014-10-25 09:55:16 -07001414 size_t mapped = 0;
Robin Murphy18f23402014-11-25 17:50:55 +00001415 unsigned int i, min_pagesz;
Joerg Roedel38ec0102014-11-04 14:53:51 +01001416 int ret;
Olav Haugan315786e2014-10-25 09:55:16 -07001417
Robin Murphyd16e0fa2016-04-07 18:42:06 +01001418 if (unlikely(domain->pgsize_bitmap == 0UL))
Robin Murphy18f23402014-11-25 17:50:55 +00001419 return 0;
Olav Haugan315786e2014-10-25 09:55:16 -07001420
Robin Murphyd16e0fa2016-04-07 18:42:06 +01001421 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
Robin Murphy18f23402014-11-25 17:50:55 +00001422
1423 for_each_sg(sg, s, nents, i) {
Dan Williams3e6110f2015-12-15 12:54:06 -08001424 phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
Robin Murphy18f23402014-11-25 17:50:55 +00001425
1426 /*
1427 * We are mapping on IOMMU page boundaries, so offset within
1428 * the page must be 0. However, the IOMMU may support pages
1429 * smaller than PAGE_SIZE, so s->offset may still represent
1430 * an offset of that boundary within the CPU page.
1431 */
1432 if (!IS_ALIGNED(s->offset, min_pagesz))
Joerg Roedel38ec0102014-11-04 14:53:51 +01001433 goto out_err;
1434
1435 ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
1436 if (ret)
1437 goto out_err;
1438
1439 mapped += s->length;
Olav Haugan315786e2014-10-25 09:55:16 -07001440 }
1441
1442 return mapped;
Joerg Roedel38ec0102014-11-04 14:53:51 +01001443
1444out_err:
1445 /* undo mappings already done */
1446 iommu_unmap(domain, iova, mapped);
1447
1448 return 0;
1449
Olav Haugan315786e2014-10-25 09:55:16 -07001450}
1451EXPORT_SYMBOL_GPL(default_iommu_map_sg);
Joerg Roedeld7787d52013-01-29 14:26:20 +01001452
1453int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
Varun Sethi80f97f02013-03-29 01:24:00 +05301454 phys_addr_t paddr, u64 size, int prot)
Joerg Roedeld7787d52013-01-29 14:26:20 +01001455{
1456 if (unlikely(domain->ops->domain_window_enable == NULL))
1457 return -ENODEV;
1458
Varun Sethi80f97f02013-03-29 01:24:00 +05301459 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
1460 prot);
Joerg Roedeld7787d52013-01-29 14:26:20 +01001461}
1462EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
1463
1464void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
1465{
1466 if (unlikely(domain->ops->domain_window_disable == NULL))
1467 return;
1468
1469 return domain->ops->domain_window_disable(domain, wnd_nr);
1470}
1471EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
1472
Alex Williamsond72e31c2012-05-30 14:18:53 -06001473static int __init iommu_init(void)
Alex Williamson14604322011-10-21 15:56:05 -04001474{
Alex Williamsond72e31c2012-05-30 14:18:53 -06001475 iommu_group_kset = kset_create_and_add("iommu_groups",
1476 NULL, kernel_kobj);
Alex Williamsond72e31c2012-05-30 14:18:53 -06001477 BUG_ON(!iommu_group_kset);
1478
1479 return 0;
Alex Williamson14604322011-10-21 15:56:05 -04001480}
Marek Szyprowskid7ef9992015-05-19 15:20:23 +02001481core_initcall(iommu_init);
Joerg Roedel0cd76dd2012-01-26 19:40:52 +01001482
1483int iommu_domain_get_attr(struct iommu_domain *domain,
1484 enum iommu_attr attr, void *data)
1485{
Joerg Roedel0ff64f82012-01-26 19:40:53 +01001486 struct iommu_domain_geometry *geometry;
Joerg Roedeld2e12162013-01-29 13:49:04 +01001487 bool *paging;
Joerg Roedel0ff64f82012-01-26 19:40:53 +01001488 int ret = 0;
Joerg Roedel69356712013-02-04 14:00:01 +01001489 u32 *count;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +01001490
Joerg Roedel0ff64f82012-01-26 19:40:53 +01001491 switch (attr) {
1492 case DOMAIN_ATTR_GEOMETRY:
1493 geometry = data;
1494 *geometry = domain->geometry;
1495
1496 break;
Joerg Roedeld2e12162013-01-29 13:49:04 +01001497 case DOMAIN_ATTR_PAGING:
1498 paging = data;
Robin Murphyd16e0fa2016-04-07 18:42:06 +01001499 *paging = (domain->pgsize_bitmap != 0UL);
Joerg Roedeld2e12162013-01-29 13:49:04 +01001500 break;
Joerg Roedel69356712013-02-04 14:00:01 +01001501 case DOMAIN_ATTR_WINDOWS:
1502 count = data;
1503
1504 if (domain->ops->domain_get_windows != NULL)
1505 *count = domain->ops->domain_get_windows(domain);
1506 else
1507 ret = -ENODEV;
1508
1509 break;
Joerg Roedel0ff64f82012-01-26 19:40:53 +01001510 default:
1511 if (!domain->ops->domain_get_attr)
1512 return -EINVAL;
1513
1514 ret = domain->ops->domain_get_attr(domain, attr, data);
1515 }
1516
1517 return ret;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +01001518}
1519EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
1520
1521int iommu_domain_set_attr(struct iommu_domain *domain,
1522 enum iommu_attr attr, void *data)
1523{
Joerg Roedel69356712013-02-04 14:00:01 +01001524 int ret = 0;
1525 u32 *count;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +01001526
Joerg Roedel69356712013-02-04 14:00:01 +01001527 switch (attr) {
1528 case DOMAIN_ATTR_WINDOWS:
1529 count = data;
1530
1531 if (domain->ops->domain_set_windows != NULL)
1532 ret = domain->ops->domain_set_windows(domain, *count);
1533 else
1534 ret = -ENODEV;
1535
1536 break;
1537 default:
1538 if (domain->ops->domain_set_attr == NULL)
1539 return -EINVAL;
1540
1541 ret = domain->ops->domain_set_attr(domain, attr, data);
1542 }
1543
1544 return ret;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +01001545}
1546EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
Joerg Roedela1015c22015-05-28 18:41:33 +02001547
1548void iommu_get_dm_regions(struct device *dev, struct list_head *list)
1549{
1550 const struct iommu_ops *ops = dev->bus->iommu_ops;
1551
1552 if (ops && ops->get_dm_regions)
1553 ops->get_dm_regions(dev, list);
1554}
1555
1556void iommu_put_dm_regions(struct device *dev, struct list_head *list)
1557{
1558 const struct iommu_ops *ops = dev->bus->iommu_ops;
1559
1560 if (ops && ops->put_dm_regions)
1561 ops->put_dm_regions(dev, list);
1562}
Joerg Roedeld290f1e2015-05-28 18:41:36 +02001563
1564/* Request that a device is direct mapped by the IOMMU */
1565int iommu_request_dm_for_dev(struct device *dev)
1566{
1567 struct iommu_domain *dm_domain;
1568 struct iommu_group *group;
1569 int ret;
1570
1571 /* Device must already be in a group before calling this function */
1572 group = iommu_group_get_for_dev(dev);
Dan Carpenter409e5532015-06-10 13:59:27 +03001573 if (IS_ERR(group))
1574 return PTR_ERR(group);
Joerg Roedeld290f1e2015-05-28 18:41:36 +02001575
1576 mutex_lock(&group->mutex);
1577
1578 /* Check if the default domain is already direct mapped */
1579 ret = 0;
1580 if (group->default_domain &&
1581 group->default_domain->type == IOMMU_DOMAIN_IDENTITY)
1582 goto out;
1583
1584 /* Don't change mappings of existing devices */
1585 ret = -EBUSY;
1586 if (iommu_group_device_count(group) != 1)
1587 goto out;
1588
1589 /* Allocate a direct mapped domain */
1590 ret = -ENOMEM;
1591 dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY);
1592 if (!dm_domain)
1593 goto out;
1594
1595 /* Attach the device to the domain */
1596 ret = __iommu_attach_group(dm_domain, group);
1597 if (ret) {
1598 iommu_domain_free(dm_domain);
1599 goto out;
1600 }
1601
1602 /* Make the direct mapped domain the default for this group */
1603 if (group->default_domain)
1604 iommu_domain_free(group->default_domain);
1605 group->default_domain = dm_domain;
1606
1607 pr_info("Using direct mapping for device %s\n", dev_name(dev));
1608
1609 ret = 0;
1610out:
1611 mutex_unlock(&group->mutex);
1612 iommu_group_put(group);
1613
1614 return ret;
1615}