blob: 88e44ddf525a5ac9db82ebdb82036c58d41818ea [file] [log] [blame]
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +020019#define pr_fmt(fmt) "%s: " fmt, __func__
20
Joerg Roedel905d66c2011-09-06 16:03:26 +020021#include <linux/device.h>
Ohad Ben-Cohen40998182011-09-02 13:32:32 -040022#include <linux/kernel.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010023#include <linux/bug.h>
24#include <linux/types.h>
Andrew Morton60db4022009-05-06 16:03:07 -070025#include <linux/module.h>
26#include <linux/slab.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010027#include <linux/errno.h>
28#include <linux/iommu.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070029#include <linux/scatterlist.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010030
Alex Williamson14604322011-10-21 15:56:05 -040031static ssize_t show_iommu_group(struct device *dev,
32 struct device_attribute *attr, char *buf)
33{
34 unsigned int groupid;
35
36 if (iommu_device_group(dev, &groupid))
37 return 0;
38
39 return sprintf(buf, "%u", groupid);
40}
41static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL);
42
43static int add_iommu_group(struct device *dev, void *data)
44{
45 unsigned int groupid;
46
47 if (iommu_device_group(dev, &groupid) == 0)
48 return device_create_file(dev, &dev_attr_iommu_group);
49
50 return 0;
51}
52
53static int remove_iommu_group(struct device *dev)
54{
55 unsigned int groupid;
56
57 if (iommu_device_group(dev, &groupid) == 0)
58 device_remove_file(dev, &dev_attr_iommu_group);
59
60 return 0;
61}
62
63static int iommu_device_notifier(struct notifier_block *nb,
64 unsigned long action, void *data)
65{
66 struct device *dev = data;
67
68 if (action == BUS_NOTIFY_ADD_DEVICE)
69 return add_iommu_group(dev, NULL);
70 else if (action == BUS_NOTIFY_DEL_DEVICE)
71 return remove_iommu_group(dev);
72
73 return 0;
74}
75
76static struct notifier_block iommu_device_nb = {
77 .notifier_call = iommu_device_notifier,
78};
79
Joerg Roedelff217762011-08-26 16:48:26 +020080static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
Joerg Roedelfc2100e2008-11-26 17:21:24 +010081{
Alex Williamson14604322011-10-21 15:56:05 -040082 bus_register_notifier(bus, &iommu_device_nb);
83 bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
Joerg Roedelfc2100e2008-11-26 17:21:24 +010084}
85
Joerg Roedelff217762011-08-26 16:48:26 +020086/**
87 * bus_set_iommu - set iommu-callbacks for the bus
88 * @bus: bus.
89 * @ops: the callbacks provided by the iommu-driver
90 *
91 * This function is called by an iommu driver to set the iommu methods
92 * used for a particular bus. Drivers for devices on that bus can use
93 * the iommu-api after these ops are registered.
94 * This special function is needed because IOMMUs are usually devices on
95 * the bus itself, so the iommu drivers are not initialized when the bus
96 * is set up. With this function the iommu-driver can set the iommu-ops
97 * afterwards.
98 */
99int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100100{
Joerg Roedelff217762011-08-26 16:48:26 +0200101 if (bus->iommu_ops != NULL)
102 return -EBUSY;
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100103
Joerg Roedelff217762011-08-26 16:48:26 +0200104 bus->iommu_ops = ops;
105
106 /* Do IOMMU specific setup for this bus-type */
107 iommu_bus_init(bus, ops);
108
109 return 0;
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100110}
Joerg Roedelff217762011-08-26 16:48:26 +0200111EXPORT_SYMBOL_GPL(bus_set_iommu);
112
Joerg Roedela1b60c12011-09-06 18:46:34 +0200113bool iommu_present(struct bus_type *bus)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100114{
Joerg Roedel94441c32011-09-06 18:58:54 +0200115 return bus->iommu_ops != NULL;
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100116}
Joerg Roedela1b60c12011-09-06 18:46:34 +0200117EXPORT_SYMBOL_GPL(iommu_present);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100118
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400119/**
120 * iommu_set_fault_handler() - set a fault handler for an iommu domain
121 * @domain: iommu domain
122 * @handler: fault handler
Ohad Ben-Cohend5069532012-05-21 20:20:05 +0300123 * @token: user data, will be passed back to the fault handler
Ohad Ben-Cohen0ed6d2d2011-09-27 07:36:40 -0400124 *
125 * This function should be used by IOMMU users which want to be notified
126 * whenever an IOMMU fault happens.
127 *
128 * The fault handler itself should return 0 on success, and an appropriate
129 * error code otherwise.
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400130 */
131void iommu_set_fault_handler(struct iommu_domain *domain,
Ohad Ben-Cohend5069532012-05-21 20:20:05 +0300132 iommu_fault_handler_t handler,
133 void *token)
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400134{
135 BUG_ON(!domain);
136
137 domain->handler = handler;
Ohad Ben-Cohend5069532012-05-21 20:20:05 +0300138 domain->handler_token = token;
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400139}
Ohad Ben-Cohen30bd9182011-09-26 09:11:46 -0400140EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400141
Steve Mucklef132c6c2012-06-06 18:30:57 -0700142struct iommu_domain *iommu_domain_alloc(struct bus_type *bus, int flags)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100143{
144 struct iommu_domain *domain;
145 int ret;
146
Joerg Roedel94441c32011-09-06 18:58:54 +0200147 if (bus == NULL || bus->iommu_ops == NULL)
Joerg Roedel905d66c2011-09-06 16:03:26 +0200148 return NULL;
149
KyongHo Cho8bd69602011-12-16 21:38:25 +0900150 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100151 if (!domain)
152 return NULL;
153
Joerg Roedel94441c32011-09-06 18:58:54 +0200154 domain->ops = bus->iommu_ops;
Joerg Roedel905d66c2011-09-06 16:03:26 +0200155
Steve Mucklef132c6c2012-06-06 18:30:57 -0700156 ret = domain->ops->domain_init(domain, flags);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100157 if (ret)
158 goto out_free;
159
160 return domain;
161
162out_free:
163 kfree(domain);
164
165 return NULL;
166}
167EXPORT_SYMBOL_GPL(iommu_domain_alloc);
168
169void iommu_domain_free(struct iommu_domain *domain)
170{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200171 if (likely(domain->ops->domain_destroy != NULL))
172 domain->ops->domain_destroy(domain);
173
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100174 kfree(domain);
175}
176EXPORT_SYMBOL_GPL(iommu_domain_free);
177
178int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
179{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200180 if (unlikely(domain->ops->attach_dev == NULL))
181 return -ENODEV;
182
183 return domain->ops->attach_dev(domain, dev);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100184}
185EXPORT_SYMBOL_GPL(iommu_attach_device);
186
187void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
188{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200189 if (unlikely(domain->ops->detach_dev == NULL))
190 return;
191
192 domain->ops->detach_dev(domain, dev);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100193}
194EXPORT_SYMBOL_GPL(iommu_detach_device);
195
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100196phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
197 unsigned long iova)
198{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200199 if (unlikely(domain->ops->iova_to_phys == NULL))
200 return 0;
201
202 return domain->ops->iova_to_phys(domain, iova);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100203}
204EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
Sheng Yangdbb9fd82009-03-18 15:33:06 +0800205
206int iommu_domain_has_cap(struct iommu_domain *domain,
207 unsigned long cap)
208{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200209 if (unlikely(domain->ops->domain_has_cap == NULL))
210 return 0;
211
212 return domain->ops->domain_has_cap(domain, cap);
Sheng Yangdbb9fd82009-03-18 15:33:06 +0800213}
214EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100215
216int iommu_map(struct iommu_domain *domain, unsigned long iova,
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200217 phys_addr_t paddr, size_t size, int prot)
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100218{
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200219 unsigned long orig_iova = iova;
220 unsigned int min_pagesz;
221 size_t orig_size = size;
222 int ret = 0;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100223
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200224 if (unlikely(domain->ops->map == NULL))
225 return -ENODEV;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100226
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200227 /* find out the minimum page size supported */
228 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100229
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200230 /*
231 * both the virtual address and the physical one, as well as
232 * the size of the mapping, must be aligned (at least) to the
233 * size of the smallest page supported by the hardware
234 */
235 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
236 pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
237 "0x%x\n", iova, (unsigned long)paddr,
238 (unsigned long)size, min_pagesz);
239 return -EINVAL;
240 }
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100241
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200242 pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
243 (unsigned long)paddr, (unsigned long)size);
244
245 while (size) {
246 unsigned long pgsize, addr_merge = iova | paddr;
247 unsigned int pgsize_idx;
248
249 /* Max page size that still fits into 'size' */
250 pgsize_idx = __fls(size);
251
252 /* need to consider alignment requirements ? */
253 if (likely(addr_merge)) {
254 /* Max page size allowed by both iova and paddr */
255 unsigned int align_pgsize_idx = __ffs(addr_merge);
256
257 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
258 }
259
260 /* build a mask of acceptable page sizes */
261 pgsize = (1UL << (pgsize_idx + 1)) - 1;
262
263 /* throw away page sizes not supported by the hardware */
264 pgsize &= domain->ops->pgsize_bitmap;
265
266 /* make sure we're still sane */
267 BUG_ON(!pgsize);
268
269 /* pick the biggest page */
270 pgsize_idx = __fls(pgsize);
271 pgsize = 1UL << pgsize_idx;
272
273 pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
274 (unsigned long)paddr, pgsize);
275
276 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
277 if (ret)
278 break;
279
280 iova += pgsize;
281 paddr += pgsize;
282 size -= pgsize;
283 }
284
285 /* unroll mapping in case something went wrong */
286 if (ret)
287 iommu_unmap(domain, orig_iova, orig_size - size);
288
289 return ret;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100290}
291EXPORT_SYMBOL_GPL(iommu_map);
292
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200293size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100294{
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200295 size_t unmapped_page, unmapped = 0;
296 unsigned int min_pagesz;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100297
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200298 if (unlikely(domain->ops->unmap == NULL))
299 return -ENODEV;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100300
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200301 /* find out the minimum page size supported */
302 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100303
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200304 /*
305 * The virtual address, as well as the size of the mapping, must be
306 * aligned (at least) to the size of the smallest page supported
307 * by the hardware
308 */
309 if (!IS_ALIGNED(iova | size, min_pagesz)) {
310 pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
311 iova, (unsigned long)size, min_pagesz);
312 return -EINVAL;
313 }
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100314
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200315 pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
316 (unsigned long)size);
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200317
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200318 /*
319 * Keep iterating until we either unmap 'size' bytes (or more)
320 * or we hit an area that isn't mapped.
321 */
322 while (unmapped < size) {
323 size_t left = size - unmapped;
324
325 unmapped_page = domain->ops->unmap(domain, iova, left);
326 if (!unmapped_page)
327 break;
328
329 pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
330 (unsigned long)unmapped_page);
331
332 iova += unmapped_page;
333 unmapped += unmapped_page;
334 }
335
336 return unmapped;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100337}
338EXPORT_SYMBOL_GPL(iommu_unmap);
Alex Williamson14604322011-10-21 15:56:05 -0400339
Steve Mucklef132c6c2012-06-06 18:30:57 -0700340int iommu_map_range(struct iommu_domain *domain, unsigned int iova,
341 struct scatterlist *sg, unsigned int len, int prot)
342{
343 if (unlikely(domain->ops->map_range == NULL))
344 return -ENODEV;
345
346 BUG_ON(iova & (~PAGE_MASK));
347
348 return domain->ops->map_range(domain, iova, sg, len, prot);
349}
350EXPORT_SYMBOL_GPL(iommu_map_range);
351
352int iommu_unmap_range(struct iommu_domain *domain, unsigned int iova,
353 unsigned int len)
354{
355 if (unlikely(domain->ops->unmap_range == NULL))
356 return -ENODEV;
357
358 BUG_ON(iova & (~PAGE_MASK));
359
360 return domain->ops->unmap_range(domain, iova, len);
361}
362EXPORT_SYMBOL_GPL(iommu_unmap_range);
363
364phys_addr_t iommu_get_pt_base_addr(struct iommu_domain *domain)
365{
366 if (unlikely(domain->ops->get_pt_base_addr == NULL))
367 return 0;
368
369 return domain->ops->get_pt_base_addr(domain);
370}
371EXPORT_SYMBOL_GPL(iommu_get_pt_base_addr);
372
Alex Williamson14604322011-10-21 15:56:05 -0400373int iommu_device_group(struct device *dev, unsigned int *groupid)
374{
375 if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group)
376 return dev->bus->iommu_ops->device_group(dev, groupid);
377
378 return -ENODEV;
379}
380EXPORT_SYMBOL_GPL(iommu_device_group);