blob: bf6bfd1f69aa27399162073cbfa3f9f63b6f0858 [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
Donald Dutilee9071b02012-06-08 17:13:11 -040029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070031#include <linux/pci.h>
32#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030033#include <linux/iova.h>
34#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070035#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070036#include <linux/irq.h>
37#include <linux/interrupt.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Len Browneb27cae2009-07-06 23:40:19 -040039#include <linux/dmi.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070041#include <asm/irq_remapping.h>
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -040042#include <asm/iommu_table.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070043
Joerg Roedel078e1ee2012-09-26 12:44:43 +020044#include "irq_remapping.h"
45
Jiang Liu3a5670e2014-02-19 14:07:33 +080046/*
47 * Assumptions:
48 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
49 * before IO devices managed by that unit.
50 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
51 * after IO devices managed by that unit.
52 * 3) Hotplug events are rare.
53 *
54 * Locking rules for DMA and interrupt remapping related global data structures:
55 * 1) Use dmar_global_lock in process context
56 * 2) Use RCU in interrupt context
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070057 */
Jiang Liu3a5670e2014-02-19 14:07:33 +080058DECLARE_RWSEM(dmar_global_lock);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070059LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070060
Suresh Siddha41750d32011-08-23 17:05:18 -070061struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080062static acpi_size dmar_tbl_size;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070063
Jiang Liu694835d2014-01-06 14:18:16 +080064static int alloc_iommu(struct dmar_drhd_unit *drhd);
Jiang Liua868e6b2014-01-06 14:18:20 +080065static void free_iommu(struct intel_iommu *iommu);
Jiang Liu694835d2014-01-06 14:18:16 +080066
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070067static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
68{
69 /*
70 * add INCLUDE_ALL at the tail, so scan the list will find it at
71 * the very end.
72 */
73 if (drhd->include_all)
Jiang Liu0e242612014-02-19 14:07:34 +080074 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070075 else
Jiang Liu0e242612014-02-19 14:07:34 +080076 list_add_rcu(&drhd->list, &dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070077}
78
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070079static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
Jiang Liu0e242612014-02-19 14:07:34 +080080 struct pci_dev __rcu **dev, u16 segment)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070081{
82 struct pci_bus *bus;
83 struct pci_dev *pdev = NULL;
84 struct acpi_dmar_pci_path *path;
85 int count;
86
87 bus = pci_find_bus(segment, scope->bus);
88 path = (struct acpi_dmar_pci_path *)(scope + 1);
89 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
90 / sizeof(struct acpi_dmar_pci_path);
91
92 while (count) {
93 if (pdev)
94 pci_dev_put(pdev);
95 /*
96 * Some BIOSes list non-exist devices in DMAR table, just
97 * ignore it
98 */
99 if (!bus) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400100 pr_warn("Device scope bus [%d] not found\n", scope->bus);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700101 break;
102 }
Lv Zhengfa5f5082013-10-31 09:30:22 +0800103 pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700104 if (!pdev) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400105 /* warning will be printed below */
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700106 break;
107 }
108 path ++;
109 count --;
110 bus = pdev->subordinate;
111 }
112 if (!pdev) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400113 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
Lv Zhengfa5f5082013-10-31 09:30:22 +0800114 segment, scope->bus, path->device, path->function);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700115 return 0;
116 }
117 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
118 pdev->subordinate) || (scope->entry_type == \
119 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
120 pci_dev_put(pdev);
Donald Dutilee9071b02012-06-08 17:13:11 -0400121 pr_warn("Device scope type does not match for %s\n",
122 pci_name(pdev));
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700123 return -EINVAL;
124 }
Jiang Liu0e242612014-02-19 14:07:34 +0800125
126 rcu_assign_pointer(*dev, pdev);
127
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700128 return 0;
129}
130
Jiang Liubb3a6b72014-02-19 14:07:24 +0800131void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700132{
133 struct acpi_dmar_device_scope *scope;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700134
135 *cnt = 0;
136 while (start < end) {
137 scope = start;
138 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
139 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
140 (*cnt)++;
Linn Crosettoae3e7f32013-04-23 12:26:45 -0600141 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
142 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400143 pr_warn("Unsupported device scope\n");
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100144 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700145 start += scope->length;
146 }
147 if (*cnt == 0)
Jiang Liubb3a6b72014-02-19 14:07:24 +0800148 return NULL;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700149
Jiang Liubb3a6b72014-02-19 14:07:24 +0800150 return kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
151}
152
153int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
Jiang Liu0e242612014-02-19 14:07:34 +0800154 struct pci_dev __rcu ***devices, u16 segment)
Jiang Liubb3a6b72014-02-19 14:07:24 +0800155{
156 struct acpi_dmar_device_scope *scope;
157 int index, ret;
158
159 *devices = dmar_alloc_dev_scope(start, end, cnt);
160 if (*cnt == 0)
161 return 0;
162 else if (!*devices)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700163 return -ENOMEM;
164
Jiang Liubb3a6b72014-02-19 14:07:24 +0800165 for (index = 0; start < end; start += scope->length) {
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700166 scope = start;
167 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
168 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
169 ret = dmar_parse_one_dev_scope(scope,
170 &(*devices)[index], segment);
171 if (ret) {
Jiang Liuada4d4b2014-01-06 14:18:09 +0800172 dmar_free_dev_scope(devices, cnt);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700173 return ret;
174 }
175 index ++;
176 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700177 }
178
179 return 0;
180}
181
Jiang Liu0e242612014-02-19 14:07:34 +0800182void dmar_free_dev_scope(struct pci_dev __rcu ***devices, int *cnt)
Jiang Liuada4d4b2014-01-06 14:18:09 +0800183{
Jiang Liub683b232014-02-19 14:07:32 +0800184 int i;
185 struct pci_dev *tmp_dev;
186
Jiang Liuada4d4b2014-01-06 14:18:09 +0800187 if (*devices && *cnt) {
Jiang Liub683b232014-02-19 14:07:32 +0800188 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
189 pci_dev_put(tmp_dev);
Jiang Liuada4d4b2014-01-06 14:18:09 +0800190 kfree(*devices);
Jiang Liuada4d4b2014-01-06 14:18:09 +0800191 }
Jiang Liu0e242612014-02-19 14:07:34 +0800192
193 *devices = NULL;
194 *cnt = 0;
Jiang Liuada4d4b2014-01-06 14:18:09 +0800195}
196
Jiang Liu59ce0512014-02-19 14:07:35 +0800197/* Optimize out kzalloc()/kfree() for normal cases */
198static char dmar_pci_notify_info_buf[64];
199
200static struct dmar_pci_notify_info *
201dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
202{
203 int level = 0;
204 size_t size;
205 struct pci_dev *tmp;
206 struct dmar_pci_notify_info *info;
207
208 BUG_ON(dev->is_virtfn);
209
210 /* Only generate path[] for device addition event */
211 if (event == BUS_NOTIFY_ADD_DEVICE)
212 for (tmp = dev; tmp; tmp = tmp->bus->self)
213 level++;
214
215 size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
216 if (size <= sizeof(dmar_pci_notify_info_buf)) {
217 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
218 } else {
219 info = kzalloc(size, GFP_KERNEL);
220 if (!info) {
221 pr_warn("Out of memory when allocating notify_info "
222 "for %s.\n", pci_name(dev));
223 return NULL;
224 }
225 }
226
227 info->event = event;
228 info->dev = dev;
229 info->seg = pci_domain_nr(dev->bus);
230 info->level = level;
231 if (event == BUS_NOTIFY_ADD_DEVICE) {
232 for (tmp = dev, level--; tmp; tmp = tmp->bus->self) {
233 info->path[level].device = PCI_SLOT(tmp->devfn);
234 info->path[level].function = PCI_FUNC(tmp->devfn);
235 if (pci_is_root_bus(tmp->bus))
236 info->bus = tmp->bus->number;
237 }
238 }
239
240 return info;
241}
242
243static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
244{
245 if ((void *)info != dmar_pci_notify_info_buf)
246 kfree(info);
247}
248
249static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
250 struct acpi_dmar_pci_path *path, int count)
251{
252 int i;
253
254 if (info->bus != bus)
255 return false;
256 if (info->level != count)
257 return false;
258
259 for (i = 0; i < count; i++) {
260 if (path[i].device != info->path[i].device ||
261 path[i].function != info->path[i].function)
262 return false;
263 }
264
265 return true;
266}
267
268/* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
269int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
270 void *start, void*end, u16 segment,
271 struct pci_dev __rcu **devices, int devices_cnt)
272{
273 int i, level;
274 struct pci_dev *tmp, *dev = info->dev;
275 struct acpi_dmar_device_scope *scope;
276 struct acpi_dmar_pci_path *path;
277
278 if (segment != info->seg)
279 return 0;
280
281 for (; start < end; start += scope->length) {
282 scope = start;
283 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
284 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
285 continue;
286
287 path = (struct acpi_dmar_pci_path *)(scope + 1);
288 level = (scope->length - sizeof(*scope)) / sizeof(*path);
289 if (!dmar_match_pci_path(info, scope->bus, path, level))
290 continue;
291
292 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT) ^
293 (dev->hdr_type == PCI_HEADER_TYPE_NORMAL)) {
294 pr_warn("Device scope type does not match for %s\n",
295 pci_name(dev));
296 return -EINVAL;
297 }
298
299 for_each_dev_scope(devices, devices_cnt, i, tmp)
300 if (tmp == NULL) {
301 rcu_assign_pointer(devices[i],
302 pci_dev_get(dev));
303 return 1;
304 }
305 BUG_ON(i >= devices_cnt);
306 }
307
308 return 0;
309}
310
311int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
312 struct pci_dev __rcu **devices, int count)
313{
314 int index;
315 struct pci_dev *tmp;
316
317 if (info->seg != segment)
318 return 0;
319
320 for_each_active_dev_scope(devices, count, index, tmp)
321 if (tmp == info->dev) {
322 rcu_assign_pointer(devices[index], NULL);
323 synchronize_rcu();
324 pci_dev_put(tmp);
325 return 1;
326 }
327
328 return 0;
329}
330
331static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
332{
333 int ret = 0;
334 struct dmar_drhd_unit *dmaru;
335 struct acpi_dmar_hardware_unit *drhd;
336
337 for_each_drhd_unit(dmaru) {
338 if (dmaru->include_all)
339 continue;
340
341 drhd = container_of(dmaru->hdr,
342 struct acpi_dmar_hardware_unit, header);
343 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
344 ((void *)drhd) + drhd->header.length,
345 dmaru->segment,
346 dmaru->devices, dmaru->devices_cnt);
347 if (ret != 0)
348 break;
349 }
350 if (ret >= 0)
351 ret = dmar_iommu_notify_scope_dev(info);
352
353 return ret;
354}
355
356static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
357{
358 struct dmar_drhd_unit *dmaru;
359
360 for_each_drhd_unit(dmaru)
361 if (dmar_remove_dev_scope(info, dmaru->segment,
362 dmaru->devices, dmaru->devices_cnt))
363 break;
364 dmar_iommu_notify_scope_dev(info);
365}
366
367static int dmar_pci_bus_notifier(struct notifier_block *nb,
368 unsigned long action, void *data)
369{
370 struct pci_dev *pdev = to_pci_dev(data);
371 struct dmar_pci_notify_info *info;
372
373 /* Only care about add/remove events for physical functions */
374 if (pdev->is_virtfn)
375 return NOTIFY_DONE;
376 if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE)
377 return NOTIFY_DONE;
378
379 info = dmar_alloc_pci_notify_info(pdev, action);
380 if (!info)
381 return NOTIFY_DONE;
382
383 down_write(&dmar_global_lock);
384 if (action == BUS_NOTIFY_ADD_DEVICE)
385 dmar_pci_bus_add_dev(info);
386 else if (action == BUS_NOTIFY_DEL_DEVICE)
387 dmar_pci_bus_del_dev(info);
388 up_write(&dmar_global_lock);
389
390 dmar_free_pci_notify_info(info);
391
392 return NOTIFY_OK;
393}
394
395static struct notifier_block dmar_pci_bus_nb = {
396 .notifier_call = dmar_pci_bus_notifier,
397 .priority = INT_MIN,
398};
399
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700400/**
401 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
402 * structure which uniquely represent one DMA remapping hardware unit
403 * present in the platform
404 */
405static int __init
406dmar_parse_one_drhd(struct acpi_dmar_header *header)
407{
408 struct acpi_dmar_hardware_unit *drhd;
409 struct dmar_drhd_unit *dmaru;
410 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700411
David Woodhousee523b382009-04-10 22:27:48 -0700412 drhd = (struct acpi_dmar_hardware_unit *)header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700413 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
414 if (!dmaru)
415 return -ENOMEM;
416
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700417 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700418 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf992009-04-04 01:45:37 +0100419 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700420 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
421
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700422 ret = alloc_iommu(dmaru);
423 if (ret) {
424 kfree(dmaru);
425 return ret;
426 }
427 dmar_register_drhd_unit(dmaru);
428 return 0;
429}
430
Jiang Liua868e6b2014-01-06 14:18:20 +0800431static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
432{
433 if (dmaru->devices && dmaru->devices_cnt)
434 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
435 if (dmaru->iommu)
436 free_iommu(dmaru->iommu);
437 kfree(dmaru);
438}
439
David Woodhousef82851a2008-10-18 15:43:14 +0100440static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700441{
442 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700443
444 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
445
Yu Zhao2e824f72008-12-22 16:54:58 +0800446 if (dmaru->include_all)
447 return 0;
448
Jiang Liua868e6b2014-01-06 14:18:20 +0800449 return dmar_parse_dev_scope((void *)(drhd + 1),
450 ((void *)drhd) + drhd->header.length,
451 &dmaru->devices_cnt, &dmaru->devices,
452 drhd->segment);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700453}
454
David Woodhouseaa697072009-10-07 12:18:00 +0100455#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700456static int __init
457dmar_parse_one_rhsa(struct acpi_dmar_header *header)
458{
459 struct acpi_dmar_rhsa *rhsa;
460 struct dmar_drhd_unit *drhd;
461
462 rhsa = (struct acpi_dmar_rhsa *)header;
David Woodhouseaa697072009-10-07 12:18:00 +0100463 for_each_drhd_unit(drhd) {
Suresh Siddhaee34b322009-10-02 11:01:21 -0700464 if (drhd->reg_base_addr == rhsa->base_address) {
465 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
466
467 if (!node_online(node))
468 node = -1;
469 drhd->iommu->node = node;
David Woodhouseaa697072009-10-07 12:18:00 +0100470 return 0;
471 }
Suresh Siddhaee34b322009-10-02 11:01:21 -0700472 }
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100473 WARN_TAINT(
474 1, TAINT_FIRMWARE_WORKAROUND,
475 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
476 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
477 drhd->reg_base_addr,
478 dmi_get_system_info(DMI_BIOS_VENDOR),
479 dmi_get_system_info(DMI_BIOS_VERSION),
480 dmi_get_system_info(DMI_PRODUCT_VERSION));
Suresh Siddhaee34b322009-10-02 11:01:21 -0700481
David Woodhouseaa697072009-10-07 12:18:00 +0100482 return 0;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700483}
David Woodhouseaa697072009-10-07 12:18:00 +0100484#endif
Suresh Siddhaee34b322009-10-02 11:01:21 -0700485
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700486static void __init
487dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
488{
489 struct acpi_dmar_hardware_unit *drhd;
490 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800491 struct acpi_dmar_atsr *atsr;
Roland Dreier17b60972009-09-24 12:14:00 -0700492 struct acpi_dmar_rhsa *rhsa;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700493
494 switch (header->type) {
495 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800496 drhd = container_of(header, struct acpi_dmar_hardware_unit,
497 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400498 pr_info("DRHD base: %#016Lx flags: %#x\n",
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800499 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700500 break;
501 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800502 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
503 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400504 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700505 (unsigned long long)rmrr->base_address,
506 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700507 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800508 case ACPI_DMAR_TYPE_ATSR:
509 atsr = container_of(header, struct acpi_dmar_atsr, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400510 pr_info("ATSR flags: %#x\n", atsr->flags);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800511 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700512 case ACPI_DMAR_HARDWARE_AFFINITY:
513 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400514 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
Roland Dreier17b60972009-09-24 12:14:00 -0700515 (unsigned long long)rhsa->base_address,
516 rhsa->proximity_domain);
517 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700518 }
519}
520
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700521/**
522 * dmar_table_detect - checks to see if the platform supports DMAR devices
523 */
524static int __init dmar_table_detect(void)
525{
526 acpi_status status = AE_OK;
527
528 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800529 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
530 (struct acpi_table_header **)&dmar_tbl,
531 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700532
533 if (ACPI_SUCCESS(status) && !dmar_tbl) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400534 pr_warn("Unable to map DMAR\n");
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700535 status = AE_NOT_FOUND;
536 }
537
538 return (ACPI_SUCCESS(status) ? 1 : 0);
539}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700540
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700541/**
542 * parse_dmar_table - parses the DMA reporting table
543 */
544static int __init
545parse_dmar_table(void)
546{
547 struct acpi_table_dmar *dmar;
548 struct acpi_dmar_header *entry_header;
549 int ret = 0;
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800550 int drhd_count = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700551
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700552 /*
553 * Do it again, earlier dmar_tbl mapping could be mapped with
554 * fixed map.
555 */
556 dmar_table_detect();
557
Joseph Cihulaa59b50e2009-06-30 19:31:10 -0700558 /*
559 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
560 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
561 */
562 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
563
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700564 dmar = (struct acpi_table_dmar *)dmar_tbl;
565 if (!dmar)
566 return -ENODEV;
567
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700568 if (dmar->width < PAGE_SHIFT - 1) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400569 pr_warn("Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700570 return -EINVAL;
571 }
572
Donald Dutilee9071b02012-06-08 17:13:11 -0400573 pr_info("Host address width %d\n", dmar->width + 1);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700574
575 entry_header = (struct acpi_dmar_header *)(dmar + 1);
576 while (((unsigned long)entry_header) <
577 (((unsigned long)dmar) + dmar_tbl->length)) {
Tony Battersby084eb962009-02-11 13:24:19 -0800578 /* Avoid looping forever on bad ACPI tables */
579 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400580 pr_warn("Invalid 0-length structure\n");
Tony Battersby084eb962009-02-11 13:24:19 -0800581 ret = -EINVAL;
582 break;
583 }
584
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700585 dmar_table_print_dmar_entry(entry_header);
586
587 switch (entry_header->type) {
588 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800589 drhd_count++;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700590 ret = dmar_parse_one_drhd(entry_header);
591 break;
592 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
593 ret = dmar_parse_one_rmrr(entry_header);
594 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800595 case ACPI_DMAR_TYPE_ATSR:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800596 ret = dmar_parse_one_atsr(entry_header);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800597 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700598 case ACPI_DMAR_HARDWARE_AFFINITY:
David Woodhouseaa697072009-10-07 12:18:00 +0100599#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700600 ret = dmar_parse_one_rhsa(entry_header);
David Woodhouseaa697072009-10-07 12:18:00 +0100601#endif
Roland Dreier17b60972009-09-24 12:14:00 -0700602 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700603 default:
Donald Dutilee9071b02012-06-08 17:13:11 -0400604 pr_warn("Unknown DMAR structure type %d\n",
Roland Dreier4de75cf2009-09-24 01:01:29 +0100605 entry_header->type);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700606 ret = 0; /* for forward compatibility */
607 break;
608 }
609 if (ret)
610 break;
611
612 entry_header = ((void *)entry_header + entry_header->length);
613 }
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800614 if (drhd_count == 0)
615 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700616 return ret;
617}
618
Jiang Liu0e242612014-02-19 14:07:34 +0800619static int dmar_pci_device_match(struct pci_dev __rcu *devices[], int cnt,
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700620 struct pci_dev *dev)
621{
622 int index;
Jiang Liub683b232014-02-19 14:07:32 +0800623 struct pci_dev *tmp;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700624
625 while (dev) {
Jiang Liub683b232014-02-19 14:07:32 +0800626 for_each_active_dev_scope(devices, cnt, index, tmp)
627 if (dev == tmp)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700628 return 1;
629
630 /* Check our parent */
631 dev = dev->bus->self;
632 }
633
634 return 0;
635}
636
637struct dmar_drhd_unit *
638dmar_find_matched_drhd_unit(struct pci_dev *dev)
639{
Jiang Liu0e242612014-02-19 14:07:34 +0800640 struct dmar_drhd_unit *dmaru;
Yu Zhao2e824f72008-12-22 16:54:58 +0800641 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700642
Yinghaidda56542010-04-09 01:07:55 +0100643 dev = pci_physfn(dev);
644
Jiang Liu0e242612014-02-19 14:07:34 +0800645 rcu_read_lock();
Yijing Wang8b161f02013-10-31 17:25:16 +0800646 for_each_drhd_unit(dmaru) {
Yu Zhao2e824f72008-12-22 16:54:58 +0800647 drhd = container_of(dmaru->hdr,
648 struct acpi_dmar_hardware_unit,
649 header);
650
651 if (dmaru->include_all &&
652 drhd->segment == pci_domain_nr(dev->bus))
Jiang Liu0e242612014-02-19 14:07:34 +0800653 goto out;
Yu Zhao2e824f72008-12-22 16:54:58 +0800654
655 if (dmar_pci_device_match(dmaru->devices,
656 dmaru->devices_cnt, dev))
Jiang Liu0e242612014-02-19 14:07:34 +0800657 goto out;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700658 }
Jiang Liu0e242612014-02-19 14:07:34 +0800659 dmaru = NULL;
660out:
661 rcu_read_unlock();
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700662
Jiang Liu0e242612014-02-19 14:07:34 +0800663 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700664}
665
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700666int __init dmar_dev_scope_init(void)
667{
Suresh Siddhac2c72862011-08-23 17:05:19 -0700668 static int dmar_dev_scope_initialized;
Jiang Liua868e6b2014-01-06 14:18:20 +0800669 struct dmar_drhd_unit *drhd;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700670 int ret = -ENODEV;
671
Suresh Siddhac2c72862011-08-23 17:05:19 -0700672 if (dmar_dev_scope_initialized)
673 return dmar_dev_scope_initialized;
674
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700675 if (list_empty(&dmar_drhd_units))
676 goto fail;
677
Jiang Liub683b232014-02-19 14:07:32 +0800678 for_each_drhd_unit(drhd) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700679 ret = dmar_parse_dev(drhd);
680 if (ret)
Suresh Siddhac2c72862011-08-23 17:05:19 -0700681 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700682 }
683
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700684 ret = dmar_parse_rmrr_atsr_dev();
685 if (ret)
686 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700687
Jiang Liu59ce0512014-02-19 14:07:35 +0800688 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
689
Suresh Siddhac2c72862011-08-23 17:05:19 -0700690 dmar_dev_scope_initialized = 1;
691 return 0;
692
693fail:
694 dmar_dev_scope_initialized = ret;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700695 return ret;
696}
697
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700698
699int __init dmar_table_init(void)
700{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700701 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800702 int ret;
703
Jiang Liucc053012014-01-06 14:18:24 +0800704 if (dmar_table_initialized == 0) {
705 ret = parse_dmar_table();
706 if (ret < 0) {
707 if (ret != -ENODEV)
708 pr_info("parse DMAR table failure.\n");
709 } else if (list_empty(&dmar_drhd_units)) {
710 pr_info("No DMAR devices found\n");
711 ret = -ENODEV;
712 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700713
Jiang Liucc053012014-01-06 14:18:24 +0800714 if (ret < 0)
715 dmar_table_initialized = ret;
716 else
717 dmar_table_initialized = 1;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800718 }
719
Jiang Liucc053012014-01-06 14:18:24 +0800720 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700721}
722
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100723static void warn_invalid_dmar(u64 addr, const char *message)
724{
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100725 WARN_TAINT_ONCE(
726 1, TAINT_FIRMWARE_WORKAROUND,
727 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
728 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
729 addr, message,
730 dmi_get_system_info(DMI_BIOS_VENDOR),
731 dmi_get_system_info(DMI_BIOS_VERSION),
732 dmi_get_system_info(DMI_PRODUCT_VERSION));
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100733}
David Woodhouse6ecbf012009-12-02 09:20:27 +0000734
Rashika Kheria21004dc2013-12-18 12:01:46 +0530735static int __init check_zero_address(void)
David Woodhouse86cf8982009-11-09 22:15:15 +0000736{
737 struct acpi_table_dmar *dmar;
738 struct acpi_dmar_header *entry_header;
739 struct acpi_dmar_hardware_unit *drhd;
740
741 dmar = (struct acpi_table_dmar *)dmar_tbl;
742 entry_header = (struct acpi_dmar_header *)(dmar + 1);
743
744 while (((unsigned long)entry_header) <
745 (((unsigned long)dmar) + dmar_tbl->length)) {
746 /* Avoid looping forever on bad ACPI tables */
747 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400748 pr_warn("Invalid 0-length structure\n");
David Woodhouse86cf8982009-11-09 22:15:15 +0000749 return 0;
750 }
751
752 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
Chris Wright2c992202009-12-02 09:17:13 +0000753 void __iomem *addr;
754 u64 cap, ecap;
755
David Woodhouse86cf8982009-11-09 22:15:15 +0000756 drhd = (void *)entry_header;
757 if (!drhd->address) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100758 warn_invalid_dmar(0, "");
Chris Wright2c992202009-12-02 09:17:13 +0000759 goto failed;
David Woodhouse86cf8982009-11-09 22:15:15 +0000760 }
Chris Wright2c992202009-12-02 09:17:13 +0000761
762 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
763 if (!addr ) {
764 printk("IOMMU: can't validate: %llx\n", drhd->address);
765 goto failed;
766 }
767 cap = dmar_readq(addr + DMAR_CAP_REG);
768 ecap = dmar_readq(addr + DMAR_ECAP_REG);
769 early_iounmap(addr, VTD_PAGE_SIZE);
770 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100771 warn_invalid_dmar(drhd->address,
772 " returns all ones");
Chris Wright2c992202009-12-02 09:17:13 +0000773 goto failed;
774 }
David Woodhouse86cf8982009-11-09 22:15:15 +0000775 }
776
777 entry_header = ((void *)entry_header + entry_header->length);
778 }
779 return 1;
Chris Wright2c992202009-12-02 09:17:13 +0000780
781failed:
Chris Wright2c992202009-12-02 09:17:13 +0000782 return 0;
David Woodhouse86cf8982009-11-09 22:15:15 +0000783}
784
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400785int __init detect_intel_iommu(void)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700786{
787 int ret;
788
Jiang Liu3a5670e2014-02-19 14:07:33 +0800789 down_write(&dmar_global_lock);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700790 ret = dmar_table_detect();
David Woodhouse86cf8982009-11-09 22:15:15 +0000791 if (ret)
792 ret = check_zero_address();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700793 {
Linus Torvalds11bd04f2009-12-11 12:18:16 -0800794 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
Suresh Siddha2ae21012008-07-10 11:16:43 -0700795 iommu_detected = 1;
Chris Wright5d990b62009-12-04 12:15:21 -0800796 /* Make sure ACS will be enabled */
797 pci_request_acs();
798 }
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700799
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900800#ifdef CONFIG_X86
801 if (ret)
802 x86_init.iommu.iommu_init = intel_iommu_init;
803#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700804 }
Jiang Liub707cb02014-01-06 14:18:26 +0800805 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700806 dmar_tbl = NULL;
Jiang Liu3a5670e2014-02-19 14:07:33 +0800807 up_write(&dmar_global_lock);
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400808
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -0400809 return ret ? 1 : -ENODEV;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700810}
811
812
Donald Dutile6f5cf522012-06-04 17:29:02 -0400813static void unmap_iommu(struct intel_iommu *iommu)
814{
815 iounmap(iommu->reg);
816 release_mem_region(iommu->reg_phys, iommu->reg_size);
817}
818
819/**
820 * map_iommu: map the iommu's registers
821 * @iommu: the iommu to map
822 * @phys_addr: the physical address of the base resgister
Donald Dutilee9071b02012-06-08 17:13:11 -0400823 *
Donald Dutile6f5cf522012-06-04 17:29:02 -0400824 * Memory map the iommu's registers. Start w/ a single page, and
Donald Dutilee9071b02012-06-08 17:13:11 -0400825 * possibly expand if that turns out to be insufficent.
Donald Dutile6f5cf522012-06-04 17:29:02 -0400826 */
827static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
828{
829 int map_size, err=0;
830
831 iommu->reg_phys = phys_addr;
832 iommu->reg_size = VTD_PAGE_SIZE;
833
834 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
835 pr_err("IOMMU: can't reserve memory\n");
836 err = -EBUSY;
837 goto out;
838 }
839
840 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
841 if (!iommu->reg) {
842 pr_err("IOMMU: can't map the region\n");
843 err = -ENOMEM;
844 goto release;
845 }
846
847 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
848 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
849
850 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
851 err = -EINVAL;
852 warn_invalid_dmar(phys_addr, " returns all ones");
853 goto unmap;
854 }
855
856 /* the registers might be more than one page */
857 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
858 cap_max_fault_reg_offset(iommu->cap));
859 map_size = VTD_PAGE_ALIGN(map_size);
860 if (map_size > iommu->reg_size) {
861 iounmap(iommu->reg);
862 release_mem_region(iommu->reg_phys, iommu->reg_size);
863 iommu->reg_size = map_size;
864 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
865 iommu->name)) {
866 pr_err("IOMMU: can't reserve memory\n");
867 err = -EBUSY;
868 goto out;
869 }
870 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
871 if (!iommu->reg) {
872 pr_err("IOMMU: can't map the region\n");
873 err = -ENOMEM;
874 goto release;
875 }
876 }
877 err = 0;
878 goto out;
879
880unmap:
881 iounmap(iommu->reg);
882release:
883 release_mem_region(iommu->reg_phys, iommu->reg_size);
884out:
885 return err;
886}
887
Jiang Liu694835d2014-01-06 14:18:16 +0800888static int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700889{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700890 struct intel_iommu *iommu;
Takao Indoh3a93c842013-04-23 17:35:03 +0900891 u32 ver, sts;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700892 static int iommu_allocated = 0;
Joerg Roedel43f73922009-01-03 23:56:27 +0100893 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700894 int msagaw = 0;
Donald Dutile6f5cf522012-06-04 17:29:02 -0400895 int err;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700896
David Woodhouse6ecbf012009-12-02 09:20:27 +0000897 if (!drhd->reg_base_addr) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100898 warn_invalid_dmar(0, "");
David Woodhouse6ecbf012009-12-02 09:20:27 +0000899 return -EINVAL;
900 }
901
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700902 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
903 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700904 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700905
906 iommu->seq_id = iommu_allocated++;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700907 sprintf (iommu->name, "dmar%d", iommu->seq_id);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700908
Donald Dutile6f5cf522012-06-04 17:29:02 -0400909 err = map_iommu(iommu, drhd->reg_base_addr);
910 if (err) {
911 pr_err("IOMMU: failed to map %s\n", iommu->name);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700912 goto error;
913 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700914
Donald Dutile6f5cf522012-06-04 17:29:02 -0400915 err = -EINVAL;
Weidong Han1b573682008-12-08 15:34:06 +0800916 agaw = iommu_calculate_agaw(iommu);
917 if (agaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400918 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
919 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100920 goto err_unmap;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700921 }
922 msagaw = iommu_calculate_max_sagaw(iommu);
923 if (msagaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400924 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +0800925 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100926 goto err_unmap;
Weidong Han1b573682008-12-08 15:34:06 +0800927 }
928 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700929 iommu->msagaw = msagaw;
Weidong Han1b573682008-12-08 15:34:06 +0800930
Suresh Siddhaee34b322009-10-02 11:01:21 -0700931 iommu->node = -1;
932
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700933 ver = readl(iommu->reg + DMAR_VER_REG);
Yinghai Lu680a7522010-04-08 19:58:23 +0100934 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
935 iommu->seq_id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700936 (unsigned long long)drhd->reg_base_addr,
937 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
938 (unsigned long long)iommu->cap,
939 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700940
Takao Indoh3a93c842013-04-23 17:35:03 +0900941 /* Reflect status in gcmd */
942 sts = readl(iommu->reg + DMAR_GSTS_REG);
943 if (sts & DMA_GSTS_IRES)
944 iommu->gcmd |= DMA_GCMD_IRE;
945 if (sts & DMA_GSTS_TES)
946 iommu->gcmd |= DMA_GCMD_TE;
947 if (sts & DMA_GSTS_QIES)
948 iommu->gcmd |= DMA_GCMD_QIE;
949
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200950 raw_spin_lock_init(&iommu->register_lock);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700951
952 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700953 return 0;
David Woodhouse08155652009-08-04 09:17:20 +0100954
955 err_unmap:
Donald Dutile6f5cf522012-06-04 17:29:02 -0400956 unmap_iommu(iommu);
David Woodhouse08155652009-08-04 09:17:20 +0100957 error:
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700958 kfree(iommu);
Donald Dutile6f5cf522012-06-04 17:29:02 -0400959 return err;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700960}
961
Jiang Liua868e6b2014-01-06 14:18:20 +0800962static void free_iommu(struct intel_iommu *iommu)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700963{
Jiang Liua868e6b2014-01-06 14:18:20 +0800964 if (iommu->irq) {
965 free_irq(iommu->irq, iommu);
966 irq_set_handler_data(iommu->irq, NULL);
967 destroy_irq(iommu->irq);
968 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700969
Jiang Liua84da702014-01-06 14:18:23 +0800970 if (iommu->qi) {
971 free_page((unsigned long)iommu->qi->desc);
972 kfree(iommu->qi->desc_status);
973 kfree(iommu->qi);
974 }
975
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700976 if (iommu->reg)
Donald Dutile6f5cf522012-06-04 17:29:02 -0400977 unmap_iommu(iommu);
978
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700979 kfree(iommu);
980}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700981
982/*
983 * Reclaim all the submitted descriptors which have completed its work.
984 */
985static inline void reclaim_free_desc(struct q_inval *qi)
986{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800987 while (qi->desc_status[qi->free_tail] == QI_DONE ||
988 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -0700989 qi->desc_status[qi->free_tail] = QI_FREE;
990 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
991 qi->free_cnt++;
992 }
993}
994
Yu Zhao704126a2009-01-04 16:28:52 +0800995static int qi_check_fault(struct intel_iommu *iommu, int index)
996{
997 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800998 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +0800999 struct q_inval *qi = iommu->qi;
1000 int wait_index = (index + 1) % QI_LENGTH;
1001
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001002 if (qi->desc_status[wait_index] == QI_ABORT)
1003 return -EAGAIN;
1004
Yu Zhao704126a2009-01-04 16:28:52 +08001005 fault = readl(iommu->reg + DMAR_FSTS_REG);
1006
1007 /*
1008 * If IQE happens, the head points to the descriptor associated
1009 * with the error. No new descriptors are fetched until the IQE
1010 * is cleared.
1011 */
1012 if (fault & DMA_FSTS_IQE) {
1013 head = readl(iommu->reg + DMAR_IQH_REG);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001014 if ((head >> DMAR_IQ_SHIFT) == index) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001015 pr_err("VT-d detected invalid descriptor: "
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001016 "low=%llx, high=%llx\n",
1017 (unsigned long long)qi->desc[index].low,
1018 (unsigned long long)qi->desc[index].high);
Yu Zhao704126a2009-01-04 16:28:52 +08001019 memcpy(&qi->desc[index], &qi->desc[wait_index],
1020 sizeof(struct qi_desc));
1021 __iommu_flush_cache(iommu, &qi->desc[index],
1022 sizeof(struct qi_desc));
1023 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1024 return -EINVAL;
1025 }
1026 }
1027
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001028 /*
1029 * If ITE happens, all pending wait_desc commands are aborted.
1030 * No new descriptors are fetched until the ITE is cleared.
1031 */
1032 if (fault & DMA_FSTS_ITE) {
1033 head = readl(iommu->reg + DMAR_IQH_REG);
1034 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1035 head |= 1;
1036 tail = readl(iommu->reg + DMAR_IQT_REG);
1037 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1038
1039 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1040
1041 do {
1042 if (qi->desc_status[head] == QI_IN_USE)
1043 qi->desc_status[head] = QI_ABORT;
1044 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1045 } while (head != tail);
1046
1047 if (qi->desc_status[wait_index] == QI_ABORT)
1048 return -EAGAIN;
1049 }
1050
1051 if (fault & DMA_FSTS_ICE)
1052 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1053
Yu Zhao704126a2009-01-04 16:28:52 +08001054 return 0;
1055}
1056
Suresh Siddhafe962e92008-07-10 11:16:42 -07001057/*
1058 * Submit the queued invalidation descriptor to the remapping
1059 * hardware unit and wait for its completion.
1060 */
Yu Zhao704126a2009-01-04 16:28:52 +08001061int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -07001062{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001063 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001064 struct q_inval *qi = iommu->qi;
1065 struct qi_desc *hw, wait_desc;
1066 int wait_index, index;
1067 unsigned long flags;
1068
1069 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +08001070 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001071
1072 hw = qi->desc;
1073
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001074restart:
1075 rc = 0;
1076
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001077 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001078 while (qi->free_cnt < 3) {
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001079 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001080 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001081 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001082 }
1083
1084 index = qi->free_head;
1085 wait_index = (index + 1) % QI_LENGTH;
1086
1087 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1088
1089 hw[index] = *desc;
1090
Yu Zhao704126a2009-01-04 16:28:52 +08001091 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1092 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001093 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1094
1095 hw[wait_index] = wait_desc;
1096
1097 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
1098 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
1099
1100 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1101 qi->free_cnt -= 2;
1102
Suresh Siddhafe962e92008-07-10 11:16:42 -07001103 /*
1104 * update the HW tail register indicating the presence of
1105 * new descriptors.
1106 */
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001107 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001108
1109 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -07001110 /*
1111 * We will leave the interrupts disabled, to prevent interrupt
1112 * context to queue another cmd while a cmd is already submitted
1113 * and waiting for completion on this cpu. This is to avoid
1114 * a deadlock where the interrupt context can wait indefinitely
1115 * for free slots in the queue.
1116 */
Yu Zhao704126a2009-01-04 16:28:52 +08001117 rc = qi_check_fault(iommu, index);
1118 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001119 break;
Yu Zhao704126a2009-01-04 16:28:52 +08001120
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001121 raw_spin_unlock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001122 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001123 raw_spin_lock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001124 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001125
1126 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001127
1128 reclaim_free_desc(qi);
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001129 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +08001130
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001131 if (rc == -EAGAIN)
1132 goto restart;
1133
Yu Zhao704126a2009-01-04 16:28:52 +08001134 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001135}
1136
1137/*
1138 * Flush the global interrupt entry cache.
1139 */
1140void qi_global_iec(struct intel_iommu *iommu)
1141{
1142 struct qi_desc desc;
1143
1144 desc.low = QI_IEC_TYPE;
1145 desc.high = 0;
1146
Yu Zhao704126a2009-01-04 16:28:52 +08001147 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -07001148 qi_submit_sync(&desc, iommu);
1149}
1150
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001151void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1152 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -07001153{
Youquan Song3481f212008-10-16 16:31:55 -07001154 struct qi_desc desc;
1155
Youquan Song3481f212008-10-16 16:31:55 -07001156 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1157 | QI_CC_GRAN(type) | QI_CC_TYPE;
1158 desc.high = 0;
1159
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001160 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -07001161}
1162
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001163void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1164 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -07001165{
1166 u8 dw = 0, dr = 0;
1167
1168 struct qi_desc desc;
1169 int ih = 0;
1170
Youquan Song3481f212008-10-16 16:31:55 -07001171 if (cap_write_drain(iommu->cap))
1172 dw = 1;
1173
1174 if (cap_read_drain(iommu->cap))
1175 dr = 1;
1176
1177 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1178 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1179 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1180 | QI_IOTLB_AM(size_order);
1181
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001182 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -07001183}
1184
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001185void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1186 u64 addr, unsigned mask)
1187{
1188 struct qi_desc desc;
1189
1190 if (mask) {
1191 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1192 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1193 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1194 } else
1195 desc.high = QI_DEV_IOTLB_ADDR(addr);
1196
1197 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1198 qdep = 0;
1199
1200 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1201 QI_DIOTLB_TYPE;
1202
1203 qi_submit_sync(&desc, iommu);
1204}
1205
Suresh Siddhafe962e92008-07-10 11:16:42 -07001206/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001207 * Disable Queued Invalidation interface.
1208 */
1209void dmar_disable_qi(struct intel_iommu *iommu)
1210{
1211 unsigned long flags;
1212 u32 sts;
1213 cycles_t start_time = get_cycles();
1214
1215 if (!ecap_qis(iommu->ecap))
1216 return;
1217
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001218 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001219
1220 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1221 if (!(sts & DMA_GSTS_QIES))
1222 goto end;
1223
1224 /*
1225 * Give a chance to HW to complete the pending invalidation requests.
1226 */
1227 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1228 readl(iommu->reg + DMAR_IQH_REG)) &&
1229 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1230 cpu_relax();
1231
1232 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001233 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1234
1235 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1236 !(sts & DMA_GSTS_QIES), sts);
1237end:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001238 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001239}
1240
1241/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001242 * Enable queued invalidation.
1243 */
1244static void __dmar_enable_qi(struct intel_iommu *iommu)
1245{
David Woodhousec416daa2009-05-10 20:30:58 +01001246 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001247 unsigned long flags;
1248 struct q_inval *qi = iommu->qi;
1249
1250 qi->free_head = qi->free_tail = 0;
1251 qi->free_cnt = QI_LENGTH;
1252
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001253 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001254
1255 /* write zero to the tail reg */
1256 writel(0, iommu->reg + DMAR_IQT_REG);
1257
1258 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1259
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001260 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +01001261 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001262
1263 /* Make sure hardware complete it */
1264 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1265
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001266 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001267}
1268
1269/*
Suresh Siddhafe962e92008-07-10 11:16:42 -07001270 * Enable Queued Invalidation interface. This is a must to support
1271 * interrupt-remapping. Also used by DMA-remapping, which replaces
1272 * register based IOTLB invalidation.
1273 */
1274int dmar_enable_qi(struct intel_iommu *iommu)
1275{
Suresh Siddhafe962e92008-07-10 11:16:42 -07001276 struct q_inval *qi;
Suresh Siddha751cafe2009-10-02 11:01:22 -07001277 struct page *desc_page;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001278
1279 if (!ecap_qis(iommu->ecap))
1280 return -ENOENT;
1281
1282 /*
1283 * queued invalidation is already setup and enabled.
1284 */
1285 if (iommu->qi)
1286 return 0;
1287
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001288 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001289 if (!iommu->qi)
1290 return -ENOMEM;
1291
1292 qi = iommu->qi;
1293
Suresh Siddha751cafe2009-10-02 11:01:22 -07001294
1295 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1296 if (!desc_page) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001297 kfree(qi);
Jiang Liub707cb02014-01-06 14:18:26 +08001298 iommu->qi = NULL;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001299 return -ENOMEM;
1300 }
1301
Suresh Siddha751cafe2009-10-02 11:01:22 -07001302 qi->desc = page_address(desc_page);
1303
Hannes Reinecke37a40712013-02-06 09:50:10 +01001304 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001305 if (!qi->desc_status) {
1306 free_page((unsigned long) qi->desc);
1307 kfree(qi);
Jiang Liub707cb02014-01-06 14:18:26 +08001308 iommu->qi = NULL;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001309 return -ENOMEM;
1310 }
1311
1312 qi->free_head = qi->free_tail = 0;
1313 qi->free_cnt = QI_LENGTH;
1314
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001315 raw_spin_lock_init(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001316
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001317 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001318
1319 return 0;
1320}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001321
1322/* iommu interrupt handling. Most stuff are MSI-like. */
1323
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001324enum faulttype {
1325 DMA_REMAP,
1326 INTR_REMAP,
1327 UNKNOWN,
1328};
1329
1330static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001331{
1332 "Software",
1333 "Present bit in root entry is clear",
1334 "Present bit in context entry is clear",
1335 "Invalid context entry",
1336 "Access beyond MGAW",
1337 "PTE Write access is not set",
1338 "PTE Read access is not set",
1339 "Next page table ptr is invalid",
1340 "Root table address invalid",
1341 "Context table ptr is invalid",
1342 "non-zero reserved fields in RTP",
1343 "non-zero reserved fields in CTP",
1344 "non-zero reserved fields in PTE",
Li, Zhen-Hua4ecccd92013-03-06 10:43:17 +08001345 "PCE for translation request specifies blocking",
Suresh Siddha0ac24912009-03-16 17:04:54 -07001346};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001347
Suresh Siddha95a02e92012-03-30 11:47:07 -07001348static const char *irq_remap_fault_reasons[] =
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001349{
1350 "Detected reserved fields in the decoded interrupt-remapped request",
1351 "Interrupt index exceeded the interrupt-remapping table size",
1352 "Present field in the IRTE entry is clear",
1353 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1354 "Detected reserved fields in the IRTE entry",
1355 "Blocked a compatibility format interrupt request",
1356 "Blocked an interrupt request due to source-id verification failure",
1357};
1358
Rashika Kheria21004dc2013-12-18 12:01:46 +05301359static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001360{
Dan Carpenterfefe1ed2012-05-13 20:09:38 +03001361 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1362 ARRAY_SIZE(irq_remap_fault_reasons))) {
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001363 *fault_type = INTR_REMAP;
Suresh Siddha95a02e92012-03-30 11:47:07 -07001364 return irq_remap_fault_reasons[fault_reason - 0x20];
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001365 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1366 *fault_type = DMA_REMAP;
1367 return dma_remap_fault_reasons[fault_reason];
1368 } else {
1369 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001370 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001371 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001372}
1373
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001374void dmar_msi_unmask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001375{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001376 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001377 unsigned long flag;
1378
1379 /* unmask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001380 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001381 writel(0, iommu->reg + DMAR_FECTL_REG);
1382 /* Read a reg to force flush the post write */
1383 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001384 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001385}
1386
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001387void dmar_msi_mask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001388{
1389 unsigned long flag;
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001390 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001391
1392 /* mask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001393 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001394 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1395 /* Read a reg to force flush the post write */
1396 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001397 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001398}
1399
1400void dmar_msi_write(int irq, struct msi_msg *msg)
1401{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001402 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001403 unsigned long flag;
1404
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001405 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001406 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1407 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1408 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001409 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001410}
1411
1412void dmar_msi_read(int irq, struct msi_msg *msg)
1413{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001414 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001415 unsigned long flag;
1416
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001417 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001418 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1419 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1420 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001421 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001422}
1423
1424static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1425 u8 fault_reason, u16 source_id, unsigned long long addr)
1426{
1427 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001428 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001429
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001430 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001431
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001432 if (fault_type == INTR_REMAP)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001433 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001434 "fault index %llx\n"
1435 "INTR-REMAP:[fault reason %02d] %s\n",
1436 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1437 PCI_FUNC(source_id & 0xFF), addr >> 48,
1438 fault_reason, reason);
1439 else
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001440 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001441 "fault addr %llx \n"
1442 "DMAR:[fault reason %02d] %s\n",
1443 (type ? "DMA Read" : "DMA Write"),
1444 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1445 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001446 return 0;
1447}
1448
1449#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001450irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001451{
1452 struct intel_iommu *iommu = dev_id;
1453 int reg, fault_index;
1454 u32 fault_status;
1455 unsigned long flag;
1456
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001457 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001458 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001459 if (fault_status)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001460 pr_err("DRHD: handling fault status reg %x\n", fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001461
1462 /* TBD: ignore advanced fault log currently */
1463 if (!(fault_status & DMA_FSTS_PPF))
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001464 goto unlock_exit;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001465
1466 fault_index = dma_fsts_fault_record_index(fault_status);
1467 reg = cap_fault_reg_offset(iommu->cap);
1468 while (1) {
1469 u8 fault_reason;
1470 u16 source_id;
1471 u64 guest_addr;
1472 int type;
1473 u32 data;
1474
1475 /* highest 32 bits */
1476 data = readl(iommu->reg + reg +
1477 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1478 if (!(data & DMA_FRCD_F))
1479 break;
1480
1481 fault_reason = dma_frcd_fault_reason(data);
1482 type = dma_frcd_type(data);
1483
1484 data = readl(iommu->reg + reg +
1485 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1486 source_id = dma_frcd_source_id(data);
1487
1488 guest_addr = dmar_readq(iommu->reg + reg +
1489 fault_index * PRIMARY_FAULT_REG_LEN);
1490 guest_addr = dma_frcd_page_addr(guest_addr);
1491 /* clear the fault */
1492 writel(DMA_FRCD_F, iommu->reg + reg +
1493 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1494
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001495 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001496
1497 dmar_fault_do_one(iommu, type, fault_reason,
1498 source_id, guest_addr);
1499
1500 fault_index++;
Troy Heber8211a7b2009-08-19 15:26:11 -06001501 if (fault_index >= cap_num_fault_regs(iommu->cap))
Suresh Siddha0ac24912009-03-16 17:04:54 -07001502 fault_index = 0;
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001503 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001504 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001505
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001506 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1507
1508unlock_exit:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001509 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001510 return IRQ_HANDLED;
1511}
1512
1513int dmar_set_interrupt(struct intel_iommu *iommu)
1514{
1515 int irq, ret;
1516
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001517 /*
1518 * Check if the fault interrupt is already initialized.
1519 */
1520 if (iommu->irq)
1521 return 0;
1522
Suresh Siddha0ac24912009-03-16 17:04:54 -07001523 irq = create_irq();
1524 if (!irq) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001525 pr_err("IOMMU: no free vectors\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001526 return -EINVAL;
1527 }
1528
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001529 irq_set_handler_data(irq, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001530 iommu->irq = irq;
1531
1532 ret = arch_setup_dmar_msi(irq);
1533 if (ret) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001534 irq_set_handler_data(irq, NULL);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001535 iommu->irq = 0;
1536 destroy_irq(irq);
Chris Wrightdd726432009-05-13 15:55:52 -07001537 return ret;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001538 }
1539
Thomas Gleixner477694e2011-07-19 16:25:42 +02001540 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001541 if (ret)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001542 pr_err("IOMMU: can't request irq\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001543 return ret;
1544}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001545
1546int __init enable_drhd_fault_handling(void)
1547{
1548 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08001549 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001550
1551 /*
1552 * Enable fault control interrupt.
1553 */
Jiang Liu7c919772014-01-06 14:18:18 +08001554 for_each_iommu(iommu, drhd) {
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001555 u32 fault_status;
Jiang Liu7c919772014-01-06 14:18:18 +08001556 int ret = dmar_set_interrupt(iommu);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001557
1558 if (ret) {
Donald Dutilee9071b02012-06-08 17:13:11 -04001559 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001560 (unsigned long long)drhd->reg_base_addr, ret);
1561 return -1;
1562 }
Suresh Siddha7f99d942010-11-30 22:22:29 -08001563
1564 /*
1565 * Clear any previous faults.
1566 */
1567 dmar_fault(iommu->irq, iommu);
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001568 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1569 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001570 }
1571
1572 return 0;
1573}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001574
1575/*
1576 * Re-enable Queued Invalidation interface.
1577 */
1578int dmar_reenable_qi(struct intel_iommu *iommu)
1579{
1580 if (!ecap_qis(iommu->ecap))
1581 return -ENOENT;
1582
1583 if (!iommu->qi)
1584 return -ENOENT;
1585
1586 /*
1587 * First disable queued invalidation.
1588 */
1589 dmar_disable_qi(iommu);
1590 /*
1591 * Then enable queued invalidation again. Since there is no pending
1592 * invalidation requests now, it's safe to re-enable queued
1593 * invalidation.
1594 */
1595 __dmar_enable_qi(iommu);
1596
1597 return 0;
1598}
Youquan Song074835f2009-09-09 12:05:39 -04001599
1600/*
1601 * Check interrupt remapping support in DMAR table description.
1602 */
Luck, Tony0b8973a2009-12-16 22:59:29 +00001603int __init dmar_ir_support(void)
Youquan Song074835f2009-09-09 12:05:39 -04001604{
1605 struct acpi_table_dmar *dmar;
1606 dmar = (struct acpi_table_dmar *)dmar_tbl;
Arnaud Patard4f506e02010-03-25 18:02:58 +00001607 if (!dmar)
1608 return 0;
Youquan Song074835f2009-09-09 12:05:39 -04001609 return dmar->flags & 0x1;
1610}
Jiang Liu694835d2014-01-06 14:18:16 +08001611
Jiang Liua868e6b2014-01-06 14:18:20 +08001612static int __init dmar_free_unused_resources(void)
1613{
1614 struct dmar_drhd_unit *dmaru, *dmaru_n;
1615
1616 /* DMAR units are in use */
1617 if (irq_remapping_enabled || intel_iommu_enabled)
1618 return 0;
1619
Jiang Liu59ce0512014-02-19 14:07:35 +08001620 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1621
Jiang Liu3a5670e2014-02-19 14:07:33 +08001622 down_write(&dmar_global_lock);
Jiang Liua868e6b2014-01-06 14:18:20 +08001623 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1624 list_del(&dmaru->list);
1625 dmar_free_drhd(dmaru);
1626 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08001627 up_write(&dmar_global_lock);
Jiang Liua868e6b2014-01-06 14:18:20 +08001628
1629 return 0;
1630}
1631
1632late_initcall(dmar_free_unused_resources);
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -04001633IOMMU_INIT_POST(detect_intel_iommu);