blob: d8f98b14e2fe97ec7dc4ca2e5a48877eff3d05a8 [file] [log] [blame]
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +020019#define pr_fmt(fmt) "%s: " fmt, __func__
20
Joerg Roedel905d66c2011-09-06 16:03:26 +020021#include <linux/device.h>
Ohad Ben-Cohen40998182011-09-02 13:32:32 -040022#include <linux/kernel.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010023#include <linux/bug.h>
24#include <linux/types.h>
Andrew Morton60db4022009-05-06 16:03:07 -070025#include <linux/module.h>
26#include <linux/slab.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010027#include <linux/errno.h>
28#include <linux/iommu.h>
Alex Williamsond72e31c2012-05-30 14:18:53 -060029#include <linux/idr.h>
30#include <linux/notifier.h>
31#include <linux/err.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010032
Alex Williamsond72e31c2012-05-30 14:18:53 -060033static struct kset *iommu_group_kset;
34static struct ida iommu_group_ida;
35static struct mutex iommu_group_mutex;
36
37struct iommu_group {
38 struct kobject kobj;
39 struct kobject *devices_kobj;
40 struct list_head devices;
41 struct mutex mutex;
42 struct blocking_notifier_head notifier;
43 void *iommu_data;
44 void (*iommu_data_release)(void *iommu_data);
45 char *name;
46 int id;
47};
48
49struct iommu_device {
50 struct list_head list;
51 struct device *dev;
52 char *name;
53};
54
55struct iommu_group_attribute {
56 struct attribute attr;
57 ssize_t (*show)(struct iommu_group *group, char *buf);
58 ssize_t (*store)(struct iommu_group *group,
59 const char *buf, size_t count);
60};
61
62#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
63struct iommu_group_attribute iommu_group_attr_##_name = \
64 __ATTR(_name, _mode, _show, _store)
65
66#define to_iommu_group_attr(_attr) \
67 container_of(_attr, struct iommu_group_attribute, attr)
68#define to_iommu_group(_kobj) \
69 container_of(_kobj, struct iommu_group, kobj)
70
71static ssize_t iommu_group_attr_show(struct kobject *kobj,
72 struct attribute *__attr, char *buf)
Alex Williamson14604322011-10-21 15:56:05 -040073{
Alex Williamsond72e31c2012-05-30 14:18:53 -060074 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
75 struct iommu_group *group = to_iommu_group(kobj);
76 ssize_t ret = -EIO;
Alex Williamson14604322011-10-21 15:56:05 -040077
Alex Williamsond72e31c2012-05-30 14:18:53 -060078 if (attr->show)
79 ret = attr->show(group, buf);
80 return ret;
Alex Williamson14604322011-10-21 15:56:05 -040081}
Alex Williamsond72e31c2012-05-30 14:18:53 -060082
83static ssize_t iommu_group_attr_store(struct kobject *kobj,
84 struct attribute *__attr,
85 const char *buf, size_t count)
86{
87 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
88 struct iommu_group *group = to_iommu_group(kobj);
89 ssize_t ret = -EIO;
90
91 if (attr->store)
92 ret = attr->store(group, buf, count);
93 return ret;
94}
95
96static const struct sysfs_ops iommu_group_sysfs_ops = {
97 .show = iommu_group_attr_show,
98 .store = iommu_group_attr_store,
99};
100
101static int iommu_group_create_file(struct iommu_group *group,
102 struct iommu_group_attribute *attr)
103{
104 return sysfs_create_file(&group->kobj, &attr->attr);
105}
106
107static void iommu_group_remove_file(struct iommu_group *group,
108 struct iommu_group_attribute *attr)
109{
110 sysfs_remove_file(&group->kobj, &attr->attr);
111}
112
113static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
114{
115 return sprintf(buf, "%s\n", group->name);
116}
117
118static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
119
120static void iommu_group_release(struct kobject *kobj)
121{
122 struct iommu_group *group = to_iommu_group(kobj);
123
124 if (group->iommu_data_release)
125 group->iommu_data_release(group->iommu_data);
126
127 mutex_lock(&iommu_group_mutex);
128 ida_remove(&iommu_group_ida, group->id);
129 mutex_unlock(&iommu_group_mutex);
130
131 kfree(group->name);
132 kfree(group);
133}
134
135static struct kobj_type iommu_group_ktype = {
136 .sysfs_ops = &iommu_group_sysfs_ops,
137 .release = iommu_group_release,
138};
139
140/**
141 * iommu_group_alloc - Allocate a new group
142 * @name: Optional name to associate with group, visible in sysfs
143 *
144 * This function is called by an iommu driver to allocate a new iommu
145 * group. The iommu group represents the minimum granularity of the iommu.
146 * Upon successful return, the caller holds a reference to the supplied
147 * group in order to hold the group until devices are added. Use
148 * iommu_group_put() to release this extra reference count, allowing the
149 * group to be automatically reclaimed once it has no devices or external
150 * references.
151 */
152struct iommu_group *iommu_group_alloc(void)
153{
154 struct iommu_group *group;
155 int ret;
156
157 group = kzalloc(sizeof(*group), GFP_KERNEL);
158 if (!group)
159 return ERR_PTR(-ENOMEM);
160
161 group->kobj.kset = iommu_group_kset;
162 mutex_init(&group->mutex);
163 INIT_LIST_HEAD(&group->devices);
164 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
165
166 mutex_lock(&iommu_group_mutex);
167
168again:
169 if (unlikely(0 == ida_pre_get(&iommu_group_ida, GFP_KERNEL))) {
170 kfree(group);
171 mutex_unlock(&iommu_group_mutex);
172 return ERR_PTR(-ENOMEM);
173 }
174
175 if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id))
176 goto again;
177
178 mutex_unlock(&iommu_group_mutex);
179
180 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
181 NULL, "%d", group->id);
182 if (ret) {
183 mutex_lock(&iommu_group_mutex);
184 ida_remove(&iommu_group_ida, group->id);
185 mutex_unlock(&iommu_group_mutex);
186 kfree(group);
187 return ERR_PTR(ret);
188 }
189
190 group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
191 if (!group->devices_kobj) {
192 kobject_put(&group->kobj); /* triggers .release & free */
193 return ERR_PTR(-ENOMEM);
194 }
195
196 /*
197 * The devices_kobj holds a reference on the group kobject, so
198 * as long as that exists so will the group. We can therefore
199 * use the devices_kobj for reference counting.
200 */
201 kobject_put(&group->kobj);
202
203 return group;
204}
205EXPORT_SYMBOL_GPL(iommu_group_alloc);
206
Alexey Kardashevskiyaa16bea2013-03-25 10:23:49 +1100207struct iommu_group *iommu_group_get_by_id(int id)
208{
209 struct kobject *group_kobj;
210 struct iommu_group *group;
211 const char *name;
212
213 if (!iommu_group_kset)
214 return NULL;
215
216 name = kasprintf(GFP_KERNEL, "%d", id);
217 if (!name)
218 return NULL;
219
220 group_kobj = kset_find_obj(iommu_group_kset, name);
221 kfree(name);
222
223 if (!group_kobj)
224 return NULL;
225
226 group = container_of(group_kobj, struct iommu_group, kobj);
227 BUG_ON(group->id != id);
228
229 kobject_get(group->devices_kobj);
230 kobject_put(&group->kobj);
231
232 return group;
233}
234EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
235
Alex Williamsond72e31c2012-05-30 14:18:53 -0600236/**
237 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
238 * @group: the group
239 *
240 * iommu drivers can store data in the group for use when doing iommu
241 * operations. This function provides a way to retrieve it. Caller
242 * should hold a group reference.
243 */
244void *iommu_group_get_iommudata(struct iommu_group *group)
245{
246 return group->iommu_data;
247}
248EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
249
250/**
251 * iommu_group_set_iommudata - set iommu_data for a group
252 * @group: the group
253 * @iommu_data: new data
254 * @release: release function for iommu_data
255 *
256 * iommu drivers can store data in the group for use when doing iommu
257 * operations. This function provides a way to set the data after
258 * the group has been allocated. Caller should hold a group reference.
259 */
260void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
261 void (*release)(void *iommu_data))
262{
263 group->iommu_data = iommu_data;
264 group->iommu_data_release = release;
265}
266EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
267
268/**
269 * iommu_group_set_name - set name for a group
270 * @group: the group
271 * @name: name
272 *
273 * Allow iommu driver to set a name for a group. When set it will
274 * appear in a name attribute file under the group in sysfs.
275 */
276int iommu_group_set_name(struct iommu_group *group, const char *name)
277{
278 int ret;
279
280 if (group->name) {
281 iommu_group_remove_file(group, &iommu_group_attr_name);
282 kfree(group->name);
283 group->name = NULL;
284 if (!name)
285 return 0;
286 }
287
288 group->name = kstrdup(name, GFP_KERNEL);
289 if (!group->name)
290 return -ENOMEM;
291
292 ret = iommu_group_create_file(group, &iommu_group_attr_name);
293 if (ret) {
294 kfree(group->name);
295 group->name = NULL;
296 return ret;
297 }
298
299 return 0;
300}
301EXPORT_SYMBOL_GPL(iommu_group_set_name);
302
303/**
304 * iommu_group_add_device - add a device to an iommu group
305 * @group: the group into which to add the device (reference should be held)
306 * @dev: the device
307 *
308 * This function is called by an iommu driver to add a device into a
309 * group. Adding a device increments the group reference count.
310 */
311int iommu_group_add_device(struct iommu_group *group, struct device *dev)
312{
313 int ret, i = 0;
314 struct iommu_device *device;
315
316 device = kzalloc(sizeof(*device), GFP_KERNEL);
317 if (!device)
318 return -ENOMEM;
319
320 device->dev = dev;
321
322 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
323 if (ret) {
324 kfree(device);
325 return ret;
326 }
327
328 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
329rename:
330 if (!device->name) {
331 sysfs_remove_link(&dev->kobj, "iommu_group");
332 kfree(device);
333 return -ENOMEM;
334 }
335
336 ret = sysfs_create_link_nowarn(group->devices_kobj,
337 &dev->kobj, device->name);
338 if (ret) {
339 kfree(device->name);
340 if (ret == -EEXIST && i >= 0) {
341 /*
342 * Account for the slim chance of collision
343 * and append an instance to the name.
344 */
345 device->name = kasprintf(GFP_KERNEL, "%s.%d",
346 kobject_name(&dev->kobj), i++);
347 goto rename;
348 }
349
350 sysfs_remove_link(&dev->kobj, "iommu_group");
351 kfree(device);
352 return ret;
353 }
354
355 kobject_get(group->devices_kobj);
356
357 dev->iommu_group = group;
358
359 mutex_lock(&group->mutex);
360 list_add_tail(&device->list, &group->devices);
361 mutex_unlock(&group->mutex);
362
363 /* Notify any listeners about change to group. */
364 blocking_notifier_call_chain(&group->notifier,
365 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
366 return 0;
367}
368EXPORT_SYMBOL_GPL(iommu_group_add_device);
369
370/**
371 * iommu_group_remove_device - remove a device from it's current group
372 * @dev: device to be removed
373 *
374 * This function is called by an iommu driver to remove the device from
375 * it's current group. This decrements the iommu group reference count.
376 */
377void iommu_group_remove_device(struct device *dev)
378{
379 struct iommu_group *group = dev->iommu_group;
380 struct iommu_device *tmp_device, *device = NULL;
381
382 /* Pre-notify listeners that a device is being removed. */
383 blocking_notifier_call_chain(&group->notifier,
384 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
385
386 mutex_lock(&group->mutex);
387 list_for_each_entry(tmp_device, &group->devices, list) {
388 if (tmp_device->dev == dev) {
389 device = tmp_device;
390 list_del(&device->list);
391 break;
392 }
393 }
394 mutex_unlock(&group->mutex);
395
396 if (!device)
397 return;
398
399 sysfs_remove_link(group->devices_kobj, device->name);
400 sysfs_remove_link(&dev->kobj, "iommu_group");
401
402 kfree(device->name);
403 kfree(device);
404 dev->iommu_group = NULL;
405 kobject_put(group->devices_kobj);
406}
407EXPORT_SYMBOL_GPL(iommu_group_remove_device);
408
409/**
410 * iommu_group_for_each_dev - iterate over each device in the group
411 * @group: the group
412 * @data: caller opaque data to be passed to callback function
413 * @fn: caller supplied callback function
414 *
415 * This function is called by group users to iterate over group devices.
416 * Callers should hold a reference count to the group during callback.
417 * The group->mutex is held across callbacks, which will block calls to
418 * iommu_group_add/remove_device.
419 */
420int iommu_group_for_each_dev(struct iommu_group *group, void *data,
421 int (*fn)(struct device *, void *))
422{
423 struct iommu_device *device;
424 int ret = 0;
425
426 mutex_lock(&group->mutex);
427 list_for_each_entry(device, &group->devices, list) {
428 ret = fn(device->dev, data);
429 if (ret)
430 break;
431 }
432 mutex_unlock(&group->mutex);
433 return ret;
434}
435EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
436
437/**
438 * iommu_group_get - Return the group for a device and increment reference
439 * @dev: get the group that this device belongs to
440 *
441 * This function is called by iommu drivers and users to get the group
442 * for the specified device. If found, the group is returned and the group
443 * reference in incremented, else NULL.
444 */
445struct iommu_group *iommu_group_get(struct device *dev)
446{
447 struct iommu_group *group = dev->iommu_group;
448
449 if (group)
450 kobject_get(group->devices_kobj);
451
452 return group;
453}
454EXPORT_SYMBOL_GPL(iommu_group_get);
455
456/**
457 * iommu_group_put - Decrement group reference
458 * @group: the group to use
459 *
460 * This function is called by iommu drivers and users to release the
461 * iommu group. Once the reference count is zero, the group is released.
462 */
463void iommu_group_put(struct iommu_group *group)
464{
465 if (group)
466 kobject_put(group->devices_kobj);
467}
468EXPORT_SYMBOL_GPL(iommu_group_put);
469
470/**
471 * iommu_group_register_notifier - Register a notifier for group changes
472 * @group: the group to watch
473 * @nb: notifier block to signal
474 *
475 * This function allows iommu group users to track changes in a group.
476 * See include/linux/iommu.h for actions sent via this notifier. Caller
477 * should hold a reference to the group throughout notifier registration.
478 */
479int iommu_group_register_notifier(struct iommu_group *group,
480 struct notifier_block *nb)
481{
482 return blocking_notifier_chain_register(&group->notifier, nb);
483}
484EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
485
486/**
487 * iommu_group_unregister_notifier - Unregister a notifier
488 * @group: the group to watch
489 * @nb: notifier block to signal
490 *
491 * Unregister a previously registered group notifier block.
492 */
493int iommu_group_unregister_notifier(struct iommu_group *group,
494 struct notifier_block *nb)
495{
496 return blocking_notifier_chain_unregister(&group->notifier, nb);
497}
498EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
499
500/**
501 * iommu_group_id - Return ID for a group
502 * @group: the group to ID
503 *
504 * Return the unique ID for the group matching the sysfs group number.
505 */
506int iommu_group_id(struct iommu_group *group)
507{
508 return group->id;
509}
510EXPORT_SYMBOL_GPL(iommu_group_id);
Alex Williamson14604322011-10-21 15:56:05 -0400511
512static int add_iommu_group(struct device *dev, void *data)
513{
Alex Williamsond72e31c2012-05-30 14:18:53 -0600514 struct iommu_ops *ops = data;
Alex Williamson14604322011-10-21 15:56:05 -0400515
Alex Williamsond72e31c2012-05-30 14:18:53 -0600516 if (!ops->add_device)
517 return -ENODEV;
518
519 WARN_ON(dev->iommu_group);
520
521 ops->add_device(dev);
Alex Williamson14604322011-10-21 15:56:05 -0400522
523 return 0;
524}
525
Alex Williamsond72e31c2012-05-30 14:18:53 -0600526static int iommu_bus_notifier(struct notifier_block *nb,
527 unsigned long action, void *data)
Alex Williamson14604322011-10-21 15:56:05 -0400528{
529 struct device *dev = data;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600530 struct iommu_ops *ops = dev->bus->iommu_ops;
531 struct iommu_group *group;
532 unsigned long group_action = 0;
Alex Williamson14604322011-10-21 15:56:05 -0400533
Alex Williamsond72e31c2012-05-30 14:18:53 -0600534 /*
535 * ADD/DEL call into iommu driver ops if provided, which may
536 * result in ADD/DEL notifiers to group->notifier
537 */
538 if (action == BUS_NOTIFY_ADD_DEVICE) {
539 if (ops->add_device)
540 return ops->add_device(dev);
541 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
542 if (ops->remove_device && dev->iommu_group) {
543 ops->remove_device(dev);
544 return 0;
545 }
546 }
Alex Williamson14604322011-10-21 15:56:05 -0400547
Alex Williamsond72e31c2012-05-30 14:18:53 -0600548 /*
549 * Remaining BUS_NOTIFYs get filtered and republished to the
550 * group, if anyone is listening
551 */
552 group = iommu_group_get(dev);
553 if (!group)
554 return 0;
555
556 switch (action) {
557 case BUS_NOTIFY_BIND_DRIVER:
558 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
559 break;
560 case BUS_NOTIFY_BOUND_DRIVER:
561 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
562 break;
563 case BUS_NOTIFY_UNBIND_DRIVER:
564 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
565 break;
566 case BUS_NOTIFY_UNBOUND_DRIVER:
567 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
568 break;
569 }
570
571 if (group_action)
572 blocking_notifier_call_chain(&group->notifier,
573 group_action, dev);
574
575 iommu_group_put(group);
Alex Williamson14604322011-10-21 15:56:05 -0400576 return 0;
577}
578
Alex Williamsond72e31c2012-05-30 14:18:53 -0600579static struct notifier_block iommu_bus_nb = {
580 .notifier_call = iommu_bus_notifier,
Alex Williamson14604322011-10-21 15:56:05 -0400581};
582
Joerg Roedelff217762011-08-26 16:48:26 +0200583static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100584{
Alex Williamsond72e31c2012-05-30 14:18:53 -0600585 bus_register_notifier(bus, &iommu_bus_nb);
586 bus_for_each_dev(bus, NULL, ops, add_iommu_group);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100587}
588
Joerg Roedelff217762011-08-26 16:48:26 +0200589/**
590 * bus_set_iommu - set iommu-callbacks for the bus
591 * @bus: bus.
592 * @ops: the callbacks provided by the iommu-driver
593 *
594 * This function is called by an iommu driver to set the iommu methods
595 * used for a particular bus. Drivers for devices on that bus can use
596 * the iommu-api after these ops are registered.
597 * This special function is needed because IOMMUs are usually devices on
598 * the bus itself, so the iommu drivers are not initialized when the bus
599 * is set up. With this function the iommu-driver can set the iommu-ops
600 * afterwards.
601 */
602int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100603{
Joerg Roedelff217762011-08-26 16:48:26 +0200604 if (bus->iommu_ops != NULL)
605 return -EBUSY;
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100606
Joerg Roedelff217762011-08-26 16:48:26 +0200607 bus->iommu_ops = ops;
608
609 /* Do IOMMU specific setup for this bus-type */
610 iommu_bus_init(bus, ops);
611
612 return 0;
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100613}
Joerg Roedelff217762011-08-26 16:48:26 +0200614EXPORT_SYMBOL_GPL(bus_set_iommu);
615
Joerg Roedela1b60c12011-09-06 18:46:34 +0200616bool iommu_present(struct bus_type *bus)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100617{
Joerg Roedel94441c32011-09-06 18:58:54 +0200618 return bus->iommu_ops != NULL;
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100619}
Joerg Roedela1b60c12011-09-06 18:46:34 +0200620EXPORT_SYMBOL_GPL(iommu_present);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100621
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400622/**
623 * iommu_set_fault_handler() - set a fault handler for an iommu domain
624 * @domain: iommu domain
625 * @handler: fault handler
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +0300626 * @token: user data, will be passed back to the fault handler
Ohad Ben-Cohen0ed6d2d2011-09-27 07:36:40 -0400627 *
628 * This function should be used by IOMMU users which want to be notified
629 * whenever an IOMMU fault happens.
630 *
631 * The fault handler itself should return 0 on success, and an appropriate
632 * error code otherwise.
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400633 */
634void iommu_set_fault_handler(struct iommu_domain *domain,
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +0300635 iommu_fault_handler_t handler,
636 void *token)
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400637{
638 BUG_ON(!domain);
639
640 domain->handler = handler;
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +0300641 domain->handler_token = token;
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400642}
Ohad Ben-Cohen30bd9182011-09-26 09:11:46 -0400643EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400644
Joerg Roedel905d66c2011-09-06 16:03:26 +0200645struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100646{
647 struct iommu_domain *domain;
648 int ret;
649
Joerg Roedel94441c32011-09-06 18:58:54 +0200650 if (bus == NULL || bus->iommu_ops == NULL)
Joerg Roedel905d66c2011-09-06 16:03:26 +0200651 return NULL;
652
KyongHo Cho8bd69602011-12-16 21:38:25 +0900653 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100654 if (!domain)
655 return NULL;
656
Joerg Roedel94441c32011-09-06 18:58:54 +0200657 domain->ops = bus->iommu_ops;
Joerg Roedel905d66c2011-09-06 16:03:26 +0200658
Joerg Roedel94441c32011-09-06 18:58:54 +0200659 ret = domain->ops->domain_init(domain);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100660 if (ret)
661 goto out_free;
662
663 return domain;
664
665out_free:
666 kfree(domain);
667
668 return NULL;
669}
670EXPORT_SYMBOL_GPL(iommu_domain_alloc);
671
672void iommu_domain_free(struct iommu_domain *domain)
673{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200674 if (likely(domain->ops->domain_destroy != NULL))
675 domain->ops->domain_destroy(domain);
676
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100677 kfree(domain);
678}
679EXPORT_SYMBOL_GPL(iommu_domain_free);
680
681int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
682{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200683 if (unlikely(domain->ops->attach_dev == NULL))
684 return -ENODEV;
685
686 return domain->ops->attach_dev(domain, dev);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100687}
688EXPORT_SYMBOL_GPL(iommu_attach_device);
689
690void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
691{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200692 if (unlikely(domain->ops->detach_dev == NULL))
693 return;
694
695 domain->ops->detach_dev(domain, dev);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100696}
697EXPORT_SYMBOL_GPL(iommu_detach_device);
698
Alex Williamsond72e31c2012-05-30 14:18:53 -0600699/*
700 * IOMMU groups are really the natrual working unit of the IOMMU, but
701 * the IOMMU API works on domains and devices. Bridge that gap by
702 * iterating over the devices in a group. Ideally we'd have a single
703 * device which represents the requestor ID of the group, but we also
704 * allow IOMMU drivers to create policy defined minimum sets, where
705 * the physical hardware may be able to distiguish members, but we
706 * wish to group them at a higher level (ex. untrusted multi-function
707 * PCI devices). Thus we attach each device.
708 */
709static int iommu_group_do_attach_device(struct device *dev, void *data)
710{
711 struct iommu_domain *domain = data;
712
713 return iommu_attach_device(domain, dev);
714}
715
716int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
717{
718 return iommu_group_for_each_dev(group, domain,
719 iommu_group_do_attach_device);
720}
721EXPORT_SYMBOL_GPL(iommu_attach_group);
722
723static int iommu_group_do_detach_device(struct device *dev, void *data)
724{
725 struct iommu_domain *domain = data;
726
727 iommu_detach_device(domain, dev);
728
729 return 0;
730}
731
732void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
733{
734 iommu_group_for_each_dev(group, domain, iommu_group_do_detach_device);
735}
736EXPORT_SYMBOL_GPL(iommu_detach_group);
737
Varun Sethibb5547ac2013-03-29 01:23:58 +0530738phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100739{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200740 if (unlikely(domain->ops->iova_to_phys == NULL))
741 return 0;
742
743 return domain->ops->iova_to_phys(domain, iova);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100744}
745EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
Sheng Yangdbb9fd82009-03-18 15:33:06 +0800746
747int iommu_domain_has_cap(struct iommu_domain *domain,
748 unsigned long cap)
749{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200750 if (unlikely(domain->ops->domain_has_cap == NULL))
751 return 0;
752
753 return domain->ops->domain_has_cap(domain, cap);
Sheng Yangdbb9fd82009-03-18 15:33:06 +0800754}
755EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100756
757int iommu_map(struct iommu_domain *domain, unsigned long iova,
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200758 phys_addr_t paddr, size_t size, int prot)
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100759{
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200760 unsigned long orig_iova = iova;
761 unsigned int min_pagesz;
762 size_t orig_size = size;
763 int ret = 0;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100764
Joerg Roedel57886512013-01-29 13:41:09 +0100765 if (unlikely(domain->ops->unmap == NULL ||
766 domain->ops->pgsize_bitmap == 0UL))
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200767 return -ENODEV;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100768
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200769 /* find out the minimum page size supported */
770 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100771
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200772 /*
773 * both the virtual address and the physical one, as well as
774 * the size of the mapping, must be aligned (at least) to the
775 * size of the smallest page supported by the hardware
776 */
777 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
778 pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
779 "0x%x\n", iova, (unsigned long)paddr,
780 (unsigned long)size, min_pagesz);
781 return -EINVAL;
782 }
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100783
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200784 pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
785 (unsigned long)paddr, (unsigned long)size);
786
787 while (size) {
788 unsigned long pgsize, addr_merge = iova | paddr;
789 unsigned int pgsize_idx;
790
791 /* Max page size that still fits into 'size' */
792 pgsize_idx = __fls(size);
793
794 /* need to consider alignment requirements ? */
795 if (likely(addr_merge)) {
796 /* Max page size allowed by both iova and paddr */
797 unsigned int align_pgsize_idx = __ffs(addr_merge);
798
799 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
800 }
801
802 /* build a mask of acceptable page sizes */
803 pgsize = (1UL << (pgsize_idx + 1)) - 1;
804
805 /* throw away page sizes not supported by the hardware */
806 pgsize &= domain->ops->pgsize_bitmap;
807
808 /* make sure we're still sane */
809 BUG_ON(!pgsize);
810
811 /* pick the biggest page */
812 pgsize_idx = __fls(pgsize);
813 pgsize = 1UL << pgsize_idx;
814
815 pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
816 (unsigned long)paddr, pgsize);
817
818 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
819 if (ret)
820 break;
821
822 iova += pgsize;
823 paddr += pgsize;
824 size -= pgsize;
825 }
826
827 /* unroll mapping in case something went wrong */
828 if (ret)
829 iommu_unmap(domain, orig_iova, orig_size - size);
830
831 return ret;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100832}
833EXPORT_SYMBOL_GPL(iommu_map);
834
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200835size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100836{
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200837 size_t unmapped_page, unmapped = 0;
838 unsigned int min_pagesz;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100839
Joerg Roedel57886512013-01-29 13:41:09 +0100840 if (unlikely(domain->ops->unmap == NULL ||
841 domain->ops->pgsize_bitmap == 0UL))
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200842 return -ENODEV;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100843
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200844 /* find out the minimum page size supported */
845 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100846
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200847 /*
848 * The virtual address, as well as the size of the mapping, must be
849 * aligned (at least) to the size of the smallest page supported
850 * by the hardware
851 */
852 if (!IS_ALIGNED(iova | size, min_pagesz)) {
853 pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
854 iova, (unsigned long)size, min_pagesz);
855 return -EINVAL;
856 }
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100857
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200858 pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
859 (unsigned long)size);
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200860
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200861 /*
862 * Keep iterating until we either unmap 'size' bytes (or more)
863 * or we hit an area that isn't mapped.
864 */
865 while (unmapped < size) {
866 size_t left = size - unmapped;
867
868 unmapped_page = domain->ops->unmap(domain, iova, left);
869 if (!unmapped_page)
870 break;
871
872 pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
873 (unsigned long)unmapped_page);
874
875 iova += unmapped_page;
876 unmapped += unmapped_page;
877 }
878
879 return unmapped;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100880}
881EXPORT_SYMBOL_GPL(iommu_unmap);
Alex Williamson14604322011-10-21 15:56:05 -0400882
Joerg Roedeld7787d52013-01-29 14:26:20 +0100883
884int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
Varun Sethi80f97f02013-03-29 01:24:00 +0530885 phys_addr_t paddr, u64 size, int prot)
Joerg Roedeld7787d52013-01-29 14:26:20 +0100886{
887 if (unlikely(domain->ops->domain_window_enable == NULL))
888 return -ENODEV;
889
Varun Sethi80f97f02013-03-29 01:24:00 +0530890 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
891 prot);
Joerg Roedeld7787d52013-01-29 14:26:20 +0100892}
893EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
894
895void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
896{
897 if (unlikely(domain->ops->domain_window_disable == NULL))
898 return;
899
900 return domain->ops->domain_window_disable(domain, wnd_nr);
901}
902EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
903
Alex Williamsond72e31c2012-05-30 14:18:53 -0600904static int __init iommu_init(void)
Alex Williamson14604322011-10-21 15:56:05 -0400905{
Alex Williamsond72e31c2012-05-30 14:18:53 -0600906 iommu_group_kset = kset_create_and_add("iommu_groups",
907 NULL, kernel_kobj);
908 ida_init(&iommu_group_ida);
909 mutex_init(&iommu_group_mutex);
Alex Williamson14604322011-10-21 15:56:05 -0400910
Alex Williamsond72e31c2012-05-30 14:18:53 -0600911 BUG_ON(!iommu_group_kset);
912
913 return 0;
Alex Williamson14604322011-10-21 15:56:05 -0400914}
Alexey Kardashevskiy097e3632013-01-07 18:51:52 +1100915arch_initcall(iommu_init);
Joerg Roedel0cd76dd2012-01-26 19:40:52 +0100916
917int iommu_domain_get_attr(struct iommu_domain *domain,
918 enum iommu_attr attr, void *data)
919{
Joerg Roedel0ff64f82012-01-26 19:40:53 +0100920 struct iommu_domain_geometry *geometry;
Joerg Roedeld2e12162013-01-29 13:49:04 +0100921 bool *paging;
Joerg Roedel0ff64f82012-01-26 19:40:53 +0100922 int ret = 0;
Joerg Roedel69356712013-02-04 14:00:01 +0100923 u32 *count;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +0100924
Joerg Roedel0ff64f82012-01-26 19:40:53 +0100925 switch (attr) {
926 case DOMAIN_ATTR_GEOMETRY:
927 geometry = data;
928 *geometry = domain->geometry;
929
930 break;
Joerg Roedeld2e12162013-01-29 13:49:04 +0100931 case DOMAIN_ATTR_PAGING:
932 paging = data;
933 *paging = (domain->ops->pgsize_bitmap != 0UL);
934 break;
Joerg Roedel69356712013-02-04 14:00:01 +0100935 case DOMAIN_ATTR_WINDOWS:
936 count = data;
937
938 if (domain->ops->domain_get_windows != NULL)
939 *count = domain->ops->domain_get_windows(domain);
940 else
941 ret = -ENODEV;
942
943 break;
Joerg Roedel0ff64f82012-01-26 19:40:53 +0100944 default:
945 if (!domain->ops->domain_get_attr)
946 return -EINVAL;
947
948 ret = domain->ops->domain_get_attr(domain, attr, data);
949 }
950
951 return ret;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +0100952}
953EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
954
955int iommu_domain_set_attr(struct iommu_domain *domain,
956 enum iommu_attr attr, void *data)
957{
Joerg Roedel69356712013-02-04 14:00:01 +0100958 int ret = 0;
959 u32 *count;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +0100960
Joerg Roedel69356712013-02-04 14:00:01 +0100961 switch (attr) {
962 case DOMAIN_ATTR_WINDOWS:
963 count = data;
964
965 if (domain->ops->domain_set_windows != NULL)
966 ret = domain->ops->domain_set_windows(domain, *count);
967 else
968 ret = -ENODEV;
969
970 break;
971 default:
972 if (domain->ops->domain_set_attr == NULL)
973 return -EINVAL;
974
975 ret = domain->ops->domain_set_attr(domain, attr, data);
976 }
977
978 return ret;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +0100979}
980EXPORT_SYMBOL_GPL(iommu_domain_set_attr);