blob: a7967ceb79e6f20336354c14ae9511eb3e79c20b [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
Donald Dutilee9071b02012-06-08 17:13:11 -040029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070031#include <linux/pci.h>
32#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030033#include <linux/iova.h>
34#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070035#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070036#include <linux/irq.h>
37#include <linux/interrupt.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Len Browneb27cae2009-07-06 23:40:19 -040039#include <linux/dmi.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070041#include <asm/irq_remapping.h>
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -040042#include <asm/iommu_table.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070043
Joerg Roedel078e1ee2012-09-26 12:44:43 +020044#include "irq_remapping.h"
45
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070046/* No locks are needed as DMA remapping hardware unit
47 * list is constructed at boot time and hotplug of
48 * these units are not supported by the architecture.
49 */
50LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070051
Suresh Siddha41750d32011-08-23 17:05:18 -070052struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080053static acpi_size dmar_tbl_size;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070054
55static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
56{
57 /*
58 * add INCLUDE_ALL at the tail, so scan the list will find it at
59 * the very end.
60 */
61 if (drhd->include_all)
62 list_add_tail(&drhd->list, &dmar_drhd_units);
63 else
64 list_add(&drhd->list, &dmar_drhd_units);
65}
66
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070067static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
68 struct pci_dev **dev, u16 segment)
69{
70 struct pci_bus *bus;
71 struct pci_dev *pdev = NULL;
72 struct acpi_dmar_pci_path *path;
73 int count;
74
75 bus = pci_find_bus(segment, scope->bus);
76 path = (struct acpi_dmar_pci_path *)(scope + 1);
77 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
78 / sizeof(struct acpi_dmar_pci_path);
79
80 while (count) {
81 if (pdev)
82 pci_dev_put(pdev);
83 /*
84 * Some BIOSes list non-exist devices in DMAR table, just
85 * ignore it
86 */
87 if (!bus) {
Donald Dutilee9071b02012-06-08 17:13:11 -040088 pr_warn("Device scope bus [%d] not found\n", scope->bus);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070089 break;
90 }
91 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
92 if (!pdev) {
Donald Dutilee9071b02012-06-08 17:13:11 -040093 /* warning will be printed below */
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070094 break;
95 }
96 path ++;
97 count --;
98 bus = pdev->subordinate;
99 }
100 if (!pdev) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400101 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400102 segment, scope->bus, path->dev, path->fn);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700103 *dev = NULL;
104 return 0;
105 }
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109 pci_dev_put(pdev);
Donald Dutilee9071b02012-06-08 17:13:11 -0400110 pr_warn("Device scope type does not match for %s\n",
111 pci_name(pdev));
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700112 return -EINVAL;
113 }
114 *dev = pdev;
115 return 0;
116}
117
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700118int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
119 struct pci_dev ***devices, u16 segment)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700120{
121 struct acpi_dmar_device_scope *scope;
122 void * tmp = start;
123 int index;
124 int ret;
125
126 *cnt = 0;
127 while (start < end) {
128 scope = start;
129 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
130 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
131 (*cnt)++;
Linn Crosettoae3e7f32013-04-23 12:26:45 -0600132 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
133 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400134 pr_warn("Unsupported device scope\n");
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100135 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700136 start += scope->length;
137 }
138 if (*cnt == 0)
139 return 0;
140
141 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
142 if (!*devices)
143 return -ENOMEM;
144
145 start = tmp;
146 index = 0;
147 while (start < end) {
148 scope = start;
149 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151 ret = dmar_parse_one_dev_scope(scope,
152 &(*devices)[index], segment);
153 if (ret) {
154 kfree(*devices);
155 return ret;
156 }
157 index ++;
158 }
159 start += scope->length;
160 }
161
162 return 0;
163}
164
165/**
166 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167 * structure which uniquely represent one DMA remapping hardware unit
168 * present in the platform
169 */
170static int __init
171dmar_parse_one_drhd(struct acpi_dmar_header *header)
172{
173 struct acpi_dmar_hardware_unit *drhd;
174 struct dmar_drhd_unit *dmaru;
175 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700176
David Woodhousee523b382009-04-10 22:27:48 -0700177 drhd = (struct acpi_dmar_hardware_unit *)header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700178 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
179 if (!dmaru)
180 return -ENOMEM;
181
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700182 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700183 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf992009-04-04 01:45:37 +0100184 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700185 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
186
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700187 ret = alloc_iommu(dmaru);
188 if (ret) {
189 kfree(dmaru);
190 return ret;
191 }
192 dmar_register_drhd_unit(dmaru);
193 return 0;
194}
195
David Woodhousef82851a2008-10-18 15:43:14 +0100196static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700197{
198 struct acpi_dmar_hardware_unit *drhd;
David Woodhousef82851a2008-10-18 15:43:14 +0100199 int ret = 0;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700200
201 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
202
Yu Zhao2e824f72008-12-22 16:54:58 +0800203 if (dmaru->include_all)
204 return 0;
205
206 ret = dmar_parse_dev_scope((void *)(drhd + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700207 ((void *)drhd) + drhd->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700208 &dmaru->devices_cnt, &dmaru->devices,
209 drhd->segment);
Suresh Siddha1c7d1bc2008-09-03 16:58:35 -0700210 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700211 list_del(&dmaru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700212 kfree(dmaru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700213 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700214 return ret;
215}
216
David Woodhouseaa697072009-10-07 12:18:00 +0100217#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700218static int __init
219dmar_parse_one_rhsa(struct acpi_dmar_header *header)
220{
221 struct acpi_dmar_rhsa *rhsa;
222 struct dmar_drhd_unit *drhd;
223
224 rhsa = (struct acpi_dmar_rhsa *)header;
David Woodhouseaa697072009-10-07 12:18:00 +0100225 for_each_drhd_unit(drhd) {
Suresh Siddhaee34b322009-10-02 11:01:21 -0700226 if (drhd->reg_base_addr == rhsa->base_address) {
227 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
228
229 if (!node_online(node))
230 node = -1;
231 drhd->iommu->node = node;
David Woodhouseaa697072009-10-07 12:18:00 +0100232 return 0;
233 }
Suresh Siddhaee34b322009-10-02 11:01:21 -0700234 }
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100235 WARN_TAINT(
236 1, TAINT_FIRMWARE_WORKAROUND,
237 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
238 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
239 drhd->reg_base_addr,
240 dmi_get_system_info(DMI_BIOS_VENDOR),
241 dmi_get_system_info(DMI_BIOS_VERSION),
242 dmi_get_system_info(DMI_PRODUCT_VERSION));
Suresh Siddhaee34b322009-10-02 11:01:21 -0700243
David Woodhouseaa697072009-10-07 12:18:00 +0100244 return 0;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700245}
David Woodhouseaa697072009-10-07 12:18:00 +0100246#endif
Suresh Siddhaee34b322009-10-02 11:01:21 -0700247
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700248static void __init
249dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
250{
251 struct acpi_dmar_hardware_unit *drhd;
252 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800253 struct acpi_dmar_atsr *atsr;
Roland Dreier17b60972009-09-24 12:14:00 -0700254 struct acpi_dmar_rhsa *rhsa;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700255
256 switch (header->type) {
257 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800258 drhd = container_of(header, struct acpi_dmar_hardware_unit,
259 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400260 pr_info("DRHD base: %#016Lx flags: %#x\n",
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800261 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700262 break;
263 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800264 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
265 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400266 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700267 (unsigned long long)rmrr->base_address,
268 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700269 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800270 case ACPI_DMAR_TYPE_ATSR:
271 atsr = container_of(header, struct acpi_dmar_atsr, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400272 pr_info("ATSR flags: %#x\n", atsr->flags);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800273 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700274 case ACPI_DMAR_HARDWARE_AFFINITY:
275 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400276 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
Roland Dreier17b60972009-09-24 12:14:00 -0700277 (unsigned long long)rhsa->base_address,
278 rhsa->proximity_domain);
279 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700280 }
281}
282
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700283/**
284 * dmar_table_detect - checks to see if the platform supports DMAR devices
285 */
286static int __init dmar_table_detect(void)
287{
288 acpi_status status = AE_OK;
289
290 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800291 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
292 (struct acpi_table_header **)&dmar_tbl,
293 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700294
295 if (ACPI_SUCCESS(status) && !dmar_tbl) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400296 pr_warn("Unable to map DMAR\n");
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700297 status = AE_NOT_FOUND;
298 }
299
300 return (ACPI_SUCCESS(status) ? 1 : 0);
301}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700302
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700303/**
304 * parse_dmar_table - parses the DMA reporting table
305 */
306static int __init
307parse_dmar_table(void)
308{
309 struct acpi_table_dmar *dmar;
310 struct acpi_dmar_header *entry_header;
311 int ret = 0;
312
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700313 /*
314 * Do it again, earlier dmar_tbl mapping could be mapped with
315 * fixed map.
316 */
317 dmar_table_detect();
318
Joseph Cihulaa59b50e2009-06-30 19:31:10 -0700319 /*
320 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
321 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
322 */
323 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
324
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700325 dmar = (struct acpi_table_dmar *)dmar_tbl;
326 if (!dmar)
327 return -ENODEV;
328
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700329 if (dmar->width < PAGE_SHIFT - 1) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400330 pr_warn("Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700331 return -EINVAL;
332 }
333
Donald Dutilee9071b02012-06-08 17:13:11 -0400334 pr_info("Host address width %d\n", dmar->width + 1);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700335
336 entry_header = (struct acpi_dmar_header *)(dmar + 1);
337 while (((unsigned long)entry_header) <
338 (((unsigned long)dmar) + dmar_tbl->length)) {
Tony Battersby084eb962009-02-11 13:24:19 -0800339 /* Avoid looping forever on bad ACPI tables */
340 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400341 pr_warn("Invalid 0-length structure\n");
Tony Battersby084eb962009-02-11 13:24:19 -0800342 ret = -EINVAL;
343 break;
344 }
345
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700346 dmar_table_print_dmar_entry(entry_header);
347
348 switch (entry_header->type) {
349 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
350 ret = dmar_parse_one_drhd(entry_header);
351 break;
352 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
353 ret = dmar_parse_one_rmrr(entry_header);
354 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800355 case ACPI_DMAR_TYPE_ATSR:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800356 ret = dmar_parse_one_atsr(entry_header);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800357 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700358 case ACPI_DMAR_HARDWARE_AFFINITY:
David Woodhouseaa697072009-10-07 12:18:00 +0100359#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700360 ret = dmar_parse_one_rhsa(entry_header);
David Woodhouseaa697072009-10-07 12:18:00 +0100361#endif
Roland Dreier17b60972009-09-24 12:14:00 -0700362 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700363 default:
Donald Dutilee9071b02012-06-08 17:13:11 -0400364 pr_warn("Unknown DMAR structure type %d\n",
Roland Dreier4de75cf2009-09-24 01:01:29 +0100365 entry_header->type);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700366 ret = 0; /* for forward compatibility */
367 break;
368 }
369 if (ret)
370 break;
371
372 entry_header = ((void *)entry_header + entry_header->length);
373 }
374 return ret;
375}
376
Yinghaidda56542010-04-09 01:07:55 +0100377static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700378 struct pci_dev *dev)
379{
380 int index;
381
382 while (dev) {
383 for (index = 0; index < cnt; index++)
384 if (dev == devices[index])
385 return 1;
386
387 /* Check our parent */
388 dev = dev->bus->self;
389 }
390
391 return 0;
392}
393
394struct dmar_drhd_unit *
395dmar_find_matched_drhd_unit(struct pci_dev *dev)
396{
Yu Zhao2e824f72008-12-22 16:54:58 +0800397 struct dmar_drhd_unit *dmaru = NULL;
398 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700399
Yinghaidda56542010-04-09 01:07:55 +0100400 dev = pci_physfn(dev);
401
Yu Zhao2e824f72008-12-22 16:54:58 +0800402 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
403 drhd = container_of(dmaru->hdr,
404 struct acpi_dmar_hardware_unit,
405 header);
406
407 if (dmaru->include_all &&
408 drhd->segment == pci_domain_nr(dev->bus))
409 return dmaru;
410
411 if (dmar_pci_device_match(dmaru->devices,
412 dmaru->devices_cnt, dev))
413 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700414 }
415
416 return NULL;
417}
418
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700419int __init dmar_dev_scope_init(void)
420{
Suresh Siddhac2c72862011-08-23 17:05:19 -0700421 static int dmar_dev_scope_initialized;
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700422 struct dmar_drhd_unit *drhd, *drhd_n;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700423 int ret = -ENODEV;
424
Suresh Siddhac2c72862011-08-23 17:05:19 -0700425 if (dmar_dev_scope_initialized)
426 return dmar_dev_scope_initialized;
427
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700428 if (list_empty(&dmar_drhd_units))
429 goto fail;
430
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700431 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700432 ret = dmar_parse_dev(drhd);
433 if (ret)
Suresh Siddhac2c72862011-08-23 17:05:19 -0700434 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700435 }
436
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700437 ret = dmar_parse_rmrr_atsr_dev();
438 if (ret)
439 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700440
Suresh Siddhac2c72862011-08-23 17:05:19 -0700441 dmar_dev_scope_initialized = 1;
442 return 0;
443
444fail:
445 dmar_dev_scope_initialized = ret;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700446 return ret;
447}
448
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700449
450int __init dmar_table_init(void)
451{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700452 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800453 int ret;
454
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700455 if (dmar_table_initialized)
456 return 0;
457
458 dmar_table_initialized = 1;
459
Fenghua Yu093f87d2007-11-21 15:07:14 -0800460 ret = parse_dmar_table();
461 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700462 if (ret != -ENODEV)
Donald Dutilee9071b02012-06-08 17:13:11 -0400463 pr_info("parse DMAR table failure.\n");
Fenghua Yu093f87d2007-11-21 15:07:14 -0800464 return ret;
465 }
466
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700467 if (list_empty(&dmar_drhd_units)) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400468 pr_info("No DMAR devices found\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700469 return -ENODEV;
470 }
Fenghua Yu093f87d2007-11-21 15:07:14 -0800471
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700472 return 0;
473}
474
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100475static void warn_invalid_dmar(u64 addr, const char *message)
476{
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100477 WARN_TAINT_ONCE(
478 1, TAINT_FIRMWARE_WORKAROUND,
479 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
480 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
481 addr, message,
482 dmi_get_system_info(DMI_BIOS_VENDOR),
483 dmi_get_system_info(DMI_BIOS_VERSION),
484 dmi_get_system_info(DMI_PRODUCT_VERSION));
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100485}
David Woodhouse6ecbf012009-12-02 09:20:27 +0000486
David Woodhouse86cf8982009-11-09 22:15:15 +0000487int __init check_zero_address(void)
488{
489 struct acpi_table_dmar *dmar;
490 struct acpi_dmar_header *entry_header;
491 struct acpi_dmar_hardware_unit *drhd;
492
493 dmar = (struct acpi_table_dmar *)dmar_tbl;
494 entry_header = (struct acpi_dmar_header *)(dmar + 1);
495
496 while (((unsigned long)entry_header) <
497 (((unsigned long)dmar) + dmar_tbl->length)) {
498 /* Avoid looping forever on bad ACPI tables */
499 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400500 pr_warn("Invalid 0-length structure\n");
David Woodhouse86cf8982009-11-09 22:15:15 +0000501 return 0;
502 }
503
504 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
Chris Wright2c992202009-12-02 09:17:13 +0000505 void __iomem *addr;
506 u64 cap, ecap;
507
David Woodhouse86cf8982009-11-09 22:15:15 +0000508 drhd = (void *)entry_header;
509 if (!drhd->address) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100510 warn_invalid_dmar(0, "");
Chris Wright2c992202009-12-02 09:17:13 +0000511 goto failed;
David Woodhouse86cf8982009-11-09 22:15:15 +0000512 }
Chris Wright2c992202009-12-02 09:17:13 +0000513
514 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
515 if (!addr ) {
516 printk("IOMMU: can't validate: %llx\n", drhd->address);
517 goto failed;
518 }
519 cap = dmar_readq(addr + DMAR_CAP_REG);
520 ecap = dmar_readq(addr + DMAR_ECAP_REG);
521 early_iounmap(addr, VTD_PAGE_SIZE);
522 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100523 warn_invalid_dmar(drhd->address,
524 " returns all ones");
Chris Wright2c992202009-12-02 09:17:13 +0000525 goto failed;
526 }
David Woodhouse86cf8982009-11-09 22:15:15 +0000527 }
528
529 entry_header = ((void *)entry_header + entry_header->length);
530 }
531 return 1;
Chris Wright2c992202009-12-02 09:17:13 +0000532
533failed:
Chris Wright2c992202009-12-02 09:17:13 +0000534 return 0;
David Woodhouse86cf8982009-11-09 22:15:15 +0000535}
536
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400537int __init detect_intel_iommu(void)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700538{
539 int ret;
540
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700541 ret = dmar_table_detect();
David Woodhouse86cf8982009-11-09 22:15:15 +0000542 if (ret)
543 ret = check_zero_address();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700544 {
Suresh Siddha1cb11582008-07-10 11:16:51 -0700545 struct acpi_table_dmar *dmar;
Jan Kiszkab3a530e2011-05-15 12:34:55 +0200546
Suresh Siddha1cb11582008-07-10 11:16:51 -0700547 dmar = (struct acpi_table_dmar *) dmar_tbl;
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700548
Suresh Siddha95a02e92012-03-30 11:47:07 -0700549 if (ret && irq_remapping_enabled && cpu_has_x2apic &&
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700550 dmar->flags & 0x1)
Donald Dutilee9071b02012-06-08 17:13:11 -0400551 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700552
Linus Torvalds11bd04f2009-12-11 12:18:16 -0800553 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
Suresh Siddha2ae21012008-07-10 11:16:43 -0700554 iommu_detected = 1;
Chris Wright5d990b62009-12-04 12:15:21 -0800555 /* Make sure ACS will be enabled */
556 pci_request_acs();
557 }
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700558
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900559#ifdef CONFIG_X86
560 if (ret)
561 x86_init.iommu.iommu_init = intel_iommu_init;
562#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700563 }
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800564 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700565 dmar_tbl = NULL;
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400566
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -0400567 return ret ? 1 : -ENODEV;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700568}
569
570
Donald Dutile6f5cf522012-06-04 17:29:02 -0400571static void unmap_iommu(struct intel_iommu *iommu)
572{
573 iounmap(iommu->reg);
574 release_mem_region(iommu->reg_phys, iommu->reg_size);
575}
576
577/**
578 * map_iommu: map the iommu's registers
579 * @iommu: the iommu to map
580 * @phys_addr: the physical address of the base resgister
Donald Dutilee9071b02012-06-08 17:13:11 -0400581 *
Donald Dutile6f5cf522012-06-04 17:29:02 -0400582 * Memory map the iommu's registers. Start w/ a single page, and
Donald Dutilee9071b02012-06-08 17:13:11 -0400583 * possibly expand if that turns out to be insufficent.
Donald Dutile6f5cf522012-06-04 17:29:02 -0400584 */
585static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
586{
587 int map_size, err=0;
588
589 iommu->reg_phys = phys_addr;
590 iommu->reg_size = VTD_PAGE_SIZE;
591
592 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
593 pr_err("IOMMU: can't reserve memory\n");
594 err = -EBUSY;
595 goto out;
596 }
597
598 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
599 if (!iommu->reg) {
600 pr_err("IOMMU: can't map the region\n");
601 err = -ENOMEM;
602 goto release;
603 }
604
605 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
606 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
607
608 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
609 err = -EINVAL;
610 warn_invalid_dmar(phys_addr, " returns all ones");
611 goto unmap;
612 }
613
614 /* the registers might be more than one page */
615 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
616 cap_max_fault_reg_offset(iommu->cap));
617 map_size = VTD_PAGE_ALIGN(map_size);
618 if (map_size > iommu->reg_size) {
619 iounmap(iommu->reg);
620 release_mem_region(iommu->reg_phys, iommu->reg_size);
621 iommu->reg_size = map_size;
622 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
623 iommu->name)) {
624 pr_err("IOMMU: can't reserve memory\n");
625 err = -EBUSY;
626 goto out;
627 }
628 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
629 if (!iommu->reg) {
630 pr_err("IOMMU: can't map the region\n");
631 err = -ENOMEM;
632 goto release;
633 }
634 }
635 err = 0;
636 goto out;
637
638unmap:
639 iounmap(iommu->reg);
640release:
641 release_mem_region(iommu->reg_phys, iommu->reg_size);
642out:
643 return err;
644}
645
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700646int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700647{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700648 struct intel_iommu *iommu;
Takao Indoh3a93c842013-04-23 17:35:03 +0900649 u32 ver, sts;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700650 static int iommu_allocated = 0;
Joerg Roedel43f73922009-01-03 23:56:27 +0100651 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700652 int msagaw = 0;
Donald Dutile6f5cf522012-06-04 17:29:02 -0400653 int err;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700654
David Woodhouse6ecbf012009-12-02 09:20:27 +0000655 if (!drhd->reg_base_addr) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100656 warn_invalid_dmar(0, "");
David Woodhouse6ecbf012009-12-02 09:20:27 +0000657 return -EINVAL;
658 }
659
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700660 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
661 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700662 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700663
664 iommu->seq_id = iommu_allocated++;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700665 sprintf (iommu->name, "dmar%d", iommu->seq_id);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700666
Donald Dutile6f5cf522012-06-04 17:29:02 -0400667 err = map_iommu(iommu, drhd->reg_base_addr);
668 if (err) {
669 pr_err("IOMMU: failed to map %s\n", iommu->name);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700670 goto error;
671 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700672
Donald Dutile6f5cf522012-06-04 17:29:02 -0400673 err = -EINVAL;
Weidong Han1b573682008-12-08 15:34:06 +0800674 agaw = iommu_calculate_agaw(iommu);
675 if (agaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400676 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
677 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100678 goto err_unmap;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700679 }
680 msagaw = iommu_calculate_max_sagaw(iommu);
681 if (msagaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400682 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +0800683 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100684 goto err_unmap;
Weidong Han1b573682008-12-08 15:34:06 +0800685 }
686 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700687 iommu->msagaw = msagaw;
Weidong Han1b573682008-12-08 15:34:06 +0800688
Suresh Siddhaee34b322009-10-02 11:01:21 -0700689 iommu->node = -1;
690
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700691 ver = readl(iommu->reg + DMAR_VER_REG);
Yinghai Lu680a7522010-04-08 19:58:23 +0100692 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
693 iommu->seq_id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700694 (unsigned long long)drhd->reg_base_addr,
695 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
696 (unsigned long long)iommu->cap,
697 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700698
Takao Indoh3a93c842013-04-23 17:35:03 +0900699 /* Reflect status in gcmd */
700 sts = readl(iommu->reg + DMAR_GSTS_REG);
701 if (sts & DMA_GSTS_IRES)
702 iommu->gcmd |= DMA_GCMD_IRE;
703 if (sts & DMA_GSTS_TES)
704 iommu->gcmd |= DMA_GCMD_TE;
705 if (sts & DMA_GSTS_QIES)
706 iommu->gcmd |= DMA_GCMD_QIE;
707
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200708 raw_spin_lock_init(&iommu->register_lock);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700709
710 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700711 return 0;
David Woodhouse08155652009-08-04 09:17:20 +0100712
713 err_unmap:
Donald Dutile6f5cf522012-06-04 17:29:02 -0400714 unmap_iommu(iommu);
David Woodhouse08155652009-08-04 09:17:20 +0100715 error:
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700716 kfree(iommu);
Donald Dutile6f5cf522012-06-04 17:29:02 -0400717 return err;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700718}
719
720void free_iommu(struct intel_iommu *iommu)
721{
722 if (!iommu)
723 return;
724
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700725 free_dmar_iommu(iommu);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700726
727 if (iommu->reg)
Donald Dutile6f5cf522012-06-04 17:29:02 -0400728 unmap_iommu(iommu);
729
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700730 kfree(iommu);
731}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700732
733/*
734 * Reclaim all the submitted descriptors which have completed its work.
735 */
736static inline void reclaim_free_desc(struct q_inval *qi)
737{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800738 while (qi->desc_status[qi->free_tail] == QI_DONE ||
739 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -0700740 qi->desc_status[qi->free_tail] = QI_FREE;
741 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
742 qi->free_cnt++;
743 }
744}
745
Yu Zhao704126a2009-01-04 16:28:52 +0800746static int qi_check_fault(struct intel_iommu *iommu, int index)
747{
748 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800749 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +0800750 struct q_inval *qi = iommu->qi;
751 int wait_index = (index + 1) % QI_LENGTH;
752
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800753 if (qi->desc_status[wait_index] == QI_ABORT)
754 return -EAGAIN;
755
Yu Zhao704126a2009-01-04 16:28:52 +0800756 fault = readl(iommu->reg + DMAR_FSTS_REG);
757
758 /*
759 * If IQE happens, the head points to the descriptor associated
760 * with the error. No new descriptors are fetched until the IQE
761 * is cleared.
762 */
763 if (fault & DMA_FSTS_IQE) {
764 head = readl(iommu->reg + DMAR_IQH_REG);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800765 if ((head >> DMAR_IQ_SHIFT) == index) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400766 pr_err("VT-d detected invalid descriptor: "
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800767 "low=%llx, high=%llx\n",
768 (unsigned long long)qi->desc[index].low,
769 (unsigned long long)qi->desc[index].high);
Yu Zhao704126a2009-01-04 16:28:52 +0800770 memcpy(&qi->desc[index], &qi->desc[wait_index],
771 sizeof(struct qi_desc));
772 __iommu_flush_cache(iommu, &qi->desc[index],
773 sizeof(struct qi_desc));
774 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
775 return -EINVAL;
776 }
777 }
778
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800779 /*
780 * If ITE happens, all pending wait_desc commands are aborted.
781 * No new descriptors are fetched until the ITE is cleared.
782 */
783 if (fault & DMA_FSTS_ITE) {
784 head = readl(iommu->reg + DMAR_IQH_REG);
785 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
786 head |= 1;
787 tail = readl(iommu->reg + DMAR_IQT_REG);
788 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
789
790 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
791
792 do {
793 if (qi->desc_status[head] == QI_IN_USE)
794 qi->desc_status[head] = QI_ABORT;
795 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
796 } while (head != tail);
797
798 if (qi->desc_status[wait_index] == QI_ABORT)
799 return -EAGAIN;
800 }
801
802 if (fault & DMA_FSTS_ICE)
803 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
804
Yu Zhao704126a2009-01-04 16:28:52 +0800805 return 0;
806}
807
Suresh Siddhafe962e92008-07-10 11:16:42 -0700808/*
809 * Submit the queued invalidation descriptor to the remapping
810 * hardware unit and wait for its completion.
811 */
Yu Zhao704126a2009-01-04 16:28:52 +0800812int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -0700813{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800814 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700815 struct q_inval *qi = iommu->qi;
816 struct qi_desc *hw, wait_desc;
817 int wait_index, index;
818 unsigned long flags;
819
820 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +0800821 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700822
823 hw = qi->desc;
824
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800825restart:
826 rc = 0;
827
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200828 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700829 while (qi->free_cnt < 3) {
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200830 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700831 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200832 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700833 }
834
835 index = qi->free_head;
836 wait_index = (index + 1) % QI_LENGTH;
837
838 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
839
840 hw[index] = *desc;
841
Yu Zhao704126a2009-01-04 16:28:52 +0800842 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
843 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700844 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
845
846 hw[wait_index] = wait_desc;
847
848 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
849 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
850
851 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
852 qi->free_cnt -= 2;
853
Suresh Siddhafe962e92008-07-10 11:16:42 -0700854 /*
855 * update the HW tail register indicating the presence of
856 * new descriptors.
857 */
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800858 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700859
860 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700861 /*
862 * We will leave the interrupts disabled, to prevent interrupt
863 * context to queue another cmd while a cmd is already submitted
864 * and waiting for completion on this cpu. This is to avoid
865 * a deadlock where the interrupt context can wait indefinitely
866 * for free slots in the queue.
867 */
Yu Zhao704126a2009-01-04 16:28:52 +0800868 rc = qi_check_fault(iommu, index);
869 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800870 break;
Yu Zhao704126a2009-01-04 16:28:52 +0800871
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200872 raw_spin_unlock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700873 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200874 raw_spin_lock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700875 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800876
877 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700878
879 reclaim_free_desc(qi);
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200880 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800881
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800882 if (rc == -EAGAIN)
883 goto restart;
884
Yu Zhao704126a2009-01-04 16:28:52 +0800885 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700886}
887
888/*
889 * Flush the global interrupt entry cache.
890 */
891void qi_global_iec(struct intel_iommu *iommu)
892{
893 struct qi_desc desc;
894
895 desc.low = QI_IEC_TYPE;
896 desc.high = 0;
897
Yu Zhao704126a2009-01-04 16:28:52 +0800898 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -0700899 qi_submit_sync(&desc, iommu);
900}
901
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100902void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
903 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700904{
Youquan Song3481f212008-10-16 16:31:55 -0700905 struct qi_desc desc;
906
Youquan Song3481f212008-10-16 16:31:55 -0700907 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
908 | QI_CC_GRAN(type) | QI_CC_TYPE;
909 desc.high = 0;
910
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100911 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700912}
913
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100914void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
915 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700916{
917 u8 dw = 0, dr = 0;
918
919 struct qi_desc desc;
920 int ih = 0;
921
Youquan Song3481f212008-10-16 16:31:55 -0700922 if (cap_write_drain(iommu->cap))
923 dw = 1;
924
925 if (cap_read_drain(iommu->cap))
926 dr = 1;
927
928 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
929 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
930 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
931 | QI_IOTLB_AM(size_order);
932
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100933 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700934}
935
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800936void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
937 u64 addr, unsigned mask)
938{
939 struct qi_desc desc;
940
941 if (mask) {
942 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
943 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
944 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
945 } else
946 desc.high = QI_DEV_IOTLB_ADDR(addr);
947
948 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
949 qdep = 0;
950
951 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
952 QI_DIOTLB_TYPE;
953
954 qi_submit_sync(&desc, iommu);
955}
956
Suresh Siddhafe962e92008-07-10 11:16:42 -0700957/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700958 * Disable Queued Invalidation interface.
959 */
960void dmar_disable_qi(struct intel_iommu *iommu)
961{
962 unsigned long flags;
963 u32 sts;
964 cycles_t start_time = get_cycles();
965
966 if (!ecap_qis(iommu->ecap))
967 return;
968
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200969 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700970
971 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
972 if (!(sts & DMA_GSTS_QIES))
973 goto end;
974
975 /*
976 * Give a chance to HW to complete the pending invalidation requests.
977 */
978 while ((readl(iommu->reg + DMAR_IQT_REG) !=
979 readl(iommu->reg + DMAR_IQH_REG)) &&
980 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
981 cpu_relax();
982
983 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700984 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
985
986 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
987 !(sts & DMA_GSTS_QIES), sts);
988end:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200989 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700990}
991
992/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700993 * Enable queued invalidation.
994 */
995static void __dmar_enable_qi(struct intel_iommu *iommu)
996{
David Woodhousec416daa2009-05-10 20:30:58 +0100997 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700998 unsigned long flags;
999 struct q_inval *qi = iommu->qi;
1000
1001 qi->free_head = qi->free_tail = 0;
1002 qi->free_cnt = QI_LENGTH;
1003
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001004 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001005
1006 /* write zero to the tail reg */
1007 writel(0, iommu->reg + DMAR_IQT_REG);
1008
1009 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1010
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001011 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +01001012 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001013
1014 /* Make sure hardware complete it */
1015 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1016
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001017 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001018}
1019
1020/*
Suresh Siddhafe962e92008-07-10 11:16:42 -07001021 * Enable Queued Invalidation interface. This is a must to support
1022 * interrupt-remapping. Also used by DMA-remapping, which replaces
1023 * register based IOTLB invalidation.
1024 */
1025int dmar_enable_qi(struct intel_iommu *iommu)
1026{
Suresh Siddhafe962e92008-07-10 11:16:42 -07001027 struct q_inval *qi;
Suresh Siddha751cafe2009-10-02 11:01:22 -07001028 struct page *desc_page;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001029
1030 if (!ecap_qis(iommu->ecap))
1031 return -ENOENT;
1032
1033 /*
1034 * queued invalidation is already setup and enabled.
1035 */
1036 if (iommu->qi)
1037 return 0;
1038
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001039 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001040 if (!iommu->qi)
1041 return -ENOMEM;
1042
1043 qi = iommu->qi;
1044
Suresh Siddha751cafe2009-10-02 11:01:22 -07001045
1046 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1047 if (!desc_page) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001048 kfree(qi);
1049 iommu->qi = 0;
1050 return -ENOMEM;
1051 }
1052
Suresh Siddha751cafe2009-10-02 11:01:22 -07001053 qi->desc = page_address(desc_page);
1054
Hannes Reinecke37a40712013-02-06 09:50:10 +01001055 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001056 if (!qi->desc_status) {
1057 free_page((unsigned long) qi->desc);
1058 kfree(qi);
1059 iommu->qi = 0;
1060 return -ENOMEM;
1061 }
1062
1063 qi->free_head = qi->free_tail = 0;
1064 qi->free_cnt = QI_LENGTH;
1065
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001066 raw_spin_lock_init(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001067
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001068 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001069
1070 return 0;
1071}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001072
1073/* iommu interrupt handling. Most stuff are MSI-like. */
1074
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001075enum faulttype {
1076 DMA_REMAP,
1077 INTR_REMAP,
1078 UNKNOWN,
1079};
1080
1081static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001082{
1083 "Software",
1084 "Present bit in root entry is clear",
1085 "Present bit in context entry is clear",
1086 "Invalid context entry",
1087 "Access beyond MGAW",
1088 "PTE Write access is not set",
1089 "PTE Read access is not set",
1090 "Next page table ptr is invalid",
1091 "Root table address invalid",
1092 "Context table ptr is invalid",
1093 "non-zero reserved fields in RTP",
1094 "non-zero reserved fields in CTP",
1095 "non-zero reserved fields in PTE",
Li, Zhen-Hua4ecccd92013-03-06 10:43:17 +08001096 "PCE for translation request specifies blocking",
Suresh Siddha0ac24912009-03-16 17:04:54 -07001097};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001098
Suresh Siddha95a02e92012-03-30 11:47:07 -07001099static const char *irq_remap_fault_reasons[] =
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001100{
1101 "Detected reserved fields in the decoded interrupt-remapped request",
1102 "Interrupt index exceeded the interrupt-remapping table size",
1103 "Present field in the IRTE entry is clear",
1104 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1105 "Detected reserved fields in the IRTE entry",
1106 "Blocked a compatibility format interrupt request",
1107 "Blocked an interrupt request due to source-id verification failure",
1108};
1109
Suresh Siddha0ac24912009-03-16 17:04:54 -07001110#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1111
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001112const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001113{
Dan Carpenterfefe1ed2012-05-13 20:09:38 +03001114 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1115 ARRAY_SIZE(irq_remap_fault_reasons))) {
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001116 *fault_type = INTR_REMAP;
Suresh Siddha95a02e92012-03-30 11:47:07 -07001117 return irq_remap_fault_reasons[fault_reason - 0x20];
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001118 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1119 *fault_type = DMA_REMAP;
1120 return dma_remap_fault_reasons[fault_reason];
1121 } else {
1122 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001123 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001124 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001125}
1126
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001127void dmar_msi_unmask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001128{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001129 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001130 unsigned long flag;
1131
1132 /* unmask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001133 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001134 writel(0, iommu->reg + DMAR_FECTL_REG);
1135 /* Read a reg to force flush the post write */
1136 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001137 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001138}
1139
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001140void dmar_msi_mask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001141{
1142 unsigned long flag;
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001143 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001144
1145 /* mask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001146 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001147 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1148 /* Read a reg to force flush the post write */
1149 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001150 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001151}
1152
1153void dmar_msi_write(int irq, struct msi_msg *msg)
1154{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001155 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001156 unsigned long flag;
1157
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001158 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001159 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1160 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1161 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001162 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001163}
1164
1165void dmar_msi_read(int irq, struct msi_msg *msg)
1166{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001167 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001168 unsigned long flag;
1169
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001170 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001171 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1172 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1173 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001174 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001175}
1176
1177static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1178 u8 fault_reason, u16 source_id, unsigned long long addr)
1179{
1180 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001181 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001182
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001183 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001184
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001185 if (fault_type == INTR_REMAP)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001186 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001187 "fault index %llx\n"
1188 "INTR-REMAP:[fault reason %02d] %s\n",
1189 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1190 PCI_FUNC(source_id & 0xFF), addr >> 48,
1191 fault_reason, reason);
1192 else
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001193 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001194 "fault addr %llx \n"
1195 "DMAR:[fault reason %02d] %s\n",
1196 (type ? "DMA Read" : "DMA Write"),
1197 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1198 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001199 return 0;
1200}
1201
1202#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001203irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001204{
1205 struct intel_iommu *iommu = dev_id;
1206 int reg, fault_index;
1207 u32 fault_status;
1208 unsigned long flag;
1209
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001210 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001211 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001212 if (fault_status)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001213 pr_err("DRHD: handling fault status reg %x\n", fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001214
1215 /* TBD: ignore advanced fault log currently */
1216 if (!(fault_status & DMA_FSTS_PPF))
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001217 goto unlock_exit;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001218
1219 fault_index = dma_fsts_fault_record_index(fault_status);
1220 reg = cap_fault_reg_offset(iommu->cap);
1221 while (1) {
1222 u8 fault_reason;
1223 u16 source_id;
1224 u64 guest_addr;
1225 int type;
1226 u32 data;
1227
1228 /* highest 32 bits */
1229 data = readl(iommu->reg + reg +
1230 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1231 if (!(data & DMA_FRCD_F))
1232 break;
1233
1234 fault_reason = dma_frcd_fault_reason(data);
1235 type = dma_frcd_type(data);
1236
1237 data = readl(iommu->reg + reg +
1238 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1239 source_id = dma_frcd_source_id(data);
1240
1241 guest_addr = dmar_readq(iommu->reg + reg +
1242 fault_index * PRIMARY_FAULT_REG_LEN);
1243 guest_addr = dma_frcd_page_addr(guest_addr);
1244 /* clear the fault */
1245 writel(DMA_FRCD_F, iommu->reg + reg +
1246 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1247
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001248 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001249
1250 dmar_fault_do_one(iommu, type, fault_reason,
1251 source_id, guest_addr);
1252
1253 fault_index++;
Troy Heber8211a7b2009-08-19 15:26:11 -06001254 if (fault_index >= cap_num_fault_regs(iommu->cap))
Suresh Siddha0ac24912009-03-16 17:04:54 -07001255 fault_index = 0;
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001256 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001257 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001258
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001259 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1260
1261unlock_exit:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001262 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001263 return IRQ_HANDLED;
1264}
1265
1266int dmar_set_interrupt(struct intel_iommu *iommu)
1267{
1268 int irq, ret;
1269
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001270 /*
1271 * Check if the fault interrupt is already initialized.
1272 */
1273 if (iommu->irq)
1274 return 0;
1275
Suresh Siddha0ac24912009-03-16 17:04:54 -07001276 irq = create_irq();
1277 if (!irq) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001278 pr_err("IOMMU: no free vectors\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001279 return -EINVAL;
1280 }
1281
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001282 irq_set_handler_data(irq, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001283 iommu->irq = irq;
1284
1285 ret = arch_setup_dmar_msi(irq);
1286 if (ret) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001287 irq_set_handler_data(irq, NULL);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001288 iommu->irq = 0;
1289 destroy_irq(irq);
Chris Wrightdd726432009-05-13 15:55:52 -07001290 return ret;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001291 }
1292
Thomas Gleixner477694e2011-07-19 16:25:42 +02001293 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001294 if (ret)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001295 pr_err("IOMMU: can't request irq\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001296 return ret;
1297}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001298
1299int __init enable_drhd_fault_handling(void)
1300{
1301 struct dmar_drhd_unit *drhd;
1302
1303 /*
1304 * Enable fault control interrupt.
1305 */
1306 for_each_drhd_unit(drhd) {
1307 int ret;
1308 struct intel_iommu *iommu = drhd->iommu;
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001309 u32 fault_status;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001310 ret = dmar_set_interrupt(iommu);
1311
1312 if (ret) {
Donald Dutilee9071b02012-06-08 17:13:11 -04001313 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001314 (unsigned long long)drhd->reg_base_addr, ret);
1315 return -1;
1316 }
Suresh Siddha7f99d942010-11-30 22:22:29 -08001317
1318 /*
1319 * Clear any previous faults.
1320 */
1321 dmar_fault(iommu->irq, iommu);
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001322 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1323 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001324 }
1325
1326 return 0;
1327}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001328
1329/*
1330 * Re-enable Queued Invalidation interface.
1331 */
1332int dmar_reenable_qi(struct intel_iommu *iommu)
1333{
1334 if (!ecap_qis(iommu->ecap))
1335 return -ENOENT;
1336
1337 if (!iommu->qi)
1338 return -ENOENT;
1339
1340 /*
1341 * First disable queued invalidation.
1342 */
1343 dmar_disable_qi(iommu);
1344 /*
1345 * Then enable queued invalidation again. Since there is no pending
1346 * invalidation requests now, it's safe to re-enable queued
1347 * invalidation.
1348 */
1349 __dmar_enable_qi(iommu);
1350
1351 return 0;
1352}
Youquan Song074835f2009-09-09 12:05:39 -04001353
1354/*
1355 * Check interrupt remapping support in DMAR table description.
1356 */
Luck, Tony0b8973a2009-12-16 22:59:29 +00001357int __init dmar_ir_support(void)
Youquan Song074835f2009-09-09 12:05:39 -04001358{
1359 struct acpi_table_dmar *dmar;
1360 dmar = (struct acpi_table_dmar *)dmar_tbl;
Arnaud Patard4f506e02010-03-25 18:02:58 +00001361 if (!dmar)
1362 return 0;
Youquan Song074835f2009-09-09 12:05:39 -04001363 return dmar->flags & 0x1;
1364}
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -04001365IOMMU_INIT_POST(detect_intel_iommu);