blob: e3c03bb7c37438ba00ded0fbe76d0b3b082a3653 [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
Donald Dutilee9071b02012-06-08 17:13:11 -040029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070031#include <linux/pci.h>
32#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030033#include <linux/iova.h>
34#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070035#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070036#include <linux/irq.h>
37#include <linux/interrupt.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Len Browneb27cae2009-07-06 23:40:19 -040039#include <linux/dmi.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070041#include <asm/irq_remapping.h>
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -040042#include <asm/iommu_table.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070043
Joerg Roedel078e1ee2012-09-26 12:44:43 +020044#include "irq_remapping.h"
45
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070046/* No locks are needed as DMA remapping hardware unit
47 * list is constructed at boot time and hotplug of
48 * these units are not supported by the architecture.
49 */
50LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070051
Suresh Siddha41750d32011-08-23 17:05:18 -070052struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080053static acpi_size dmar_tbl_size;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070054
Jiang Liu694835d2014-01-06 14:18:16 +080055static int alloc_iommu(struct dmar_drhd_unit *drhd);
56
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070057static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
58{
59 /*
60 * add INCLUDE_ALL at the tail, so scan the list will find it at
61 * the very end.
62 */
63 if (drhd->include_all)
64 list_add_tail(&drhd->list, &dmar_drhd_units);
65 else
66 list_add(&drhd->list, &dmar_drhd_units);
67}
68
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070069static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
70 struct pci_dev **dev, u16 segment)
71{
72 struct pci_bus *bus;
73 struct pci_dev *pdev = NULL;
74 struct acpi_dmar_pci_path *path;
75 int count;
76
77 bus = pci_find_bus(segment, scope->bus);
78 path = (struct acpi_dmar_pci_path *)(scope + 1);
79 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
80 / sizeof(struct acpi_dmar_pci_path);
81
82 while (count) {
83 if (pdev)
84 pci_dev_put(pdev);
85 /*
86 * Some BIOSes list non-exist devices in DMAR table, just
87 * ignore it
88 */
89 if (!bus) {
Donald Dutilee9071b02012-06-08 17:13:11 -040090 pr_warn("Device scope bus [%d] not found\n", scope->bus);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070091 break;
92 }
Lv Zhengfa5f5082013-10-31 09:30:22 +080093 pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070094 if (!pdev) {
Donald Dutilee9071b02012-06-08 17:13:11 -040095 /* warning will be printed below */
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070096 break;
97 }
98 path ++;
99 count --;
100 bus = pdev->subordinate;
101 }
102 if (!pdev) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400103 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
Lv Zhengfa5f5082013-10-31 09:30:22 +0800104 segment, scope->bus, path->device, path->function);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700105 return 0;
106 }
107 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
108 pdev->subordinate) || (scope->entry_type == \
109 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
110 pci_dev_put(pdev);
Donald Dutilee9071b02012-06-08 17:13:11 -0400111 pr_warn("Device scope type does not match for %s\n",
112 pci_name(pdev));
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700113 return -EINVAL;
114 }
115 *dev = pdev;
116 return 0;
117}
118
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700119int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120 struct pci_dev ***devices, u16 segment)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700121{
122 struct acpi_dmar_device_scope *scope;
123 void * tmp = start;
124 int index;
125 int ret;
126
127 *cnt = 0;
128 while (start < end) {
129 scope = start;
130 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
132 (*cnt)++;
Linn Crosettoae3e7f32013-04-23 12:26:45 -0600133 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
134 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400135 pr_warn("Unsupported device scope\n");
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100136 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700137 start += scope->length;
138 }
139 if (*cnt == 0)
140 return 0;
141
142 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
143 if (!*devices)
144 return -ENOMEM;
145
146 start = tmp;
147 index = 0;
148 while (start < end) {
149 scope = start;
150 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
151 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
152 ret = dmar_parse_one_dev_scope(scope,
153 &(*devices)[index], segment);
154 if (ret) {
Jiang Liuada4d4b2014-01-06 14:18:09 +0800155 dmar_free_dev_scope(devices, cnt);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700156 return ret;
157 }
158 index ++;
159 }
160 start += scope->length;
161 }
162
163 return 0;
164}
165
Jiang Liuada4d4b2014-01-06 14:18:09 +0800166void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
167{
168 if (*devices && *cnt) {
169 while (--*cnt >= 0)
170 pci_dev_put((*devices)[*cnt]);
171 kfree(*devices);
172 *devices = NULL;
173 *cnt = 0;
174 }
175}
176
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700177/**
178 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
179 * structure which uniquely represent one DMA remapping hardware unit
180 * present in the platform
181 */
182static int __init
183dmar_parse_one_drhd(struct acpi_dmar_header *header)
184{
185 struct acpi_dmar_hardware_unit *drhd;
186 struct dmar_drhd_unit *dmaru;
187 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700188
David Woodhousee523b382009-04-10 22:27:48 -0700189 drhd = (struct acpi_dmar_hardware_unit *)header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700190 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
191 if (!dmaru)
192 return -ENOMEM;
193
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700194 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700195 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf992009-04-04 01:45:37 +0100196 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700197 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
198
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700199 ret = alloc_iommu(dmaru);
200 if (ret) {
201 kfree(dmaru);
202 return ret;
203 }
204 dmar_register_drhd_unit(dmaru);
205 return 0;
206}
207
David Woodhousef82851a2008-10-18 15:43:14 +0100208static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700209{
210 struct acpi_dmar_hardware_unit *drhd;
David Woodhousef82851a2008-10-18 15:43:14 +0100211 int ret = 0;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700212
213 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
214
Yu Zhao2e824f72008-12-22 16:54:58 +0800215 if (dmaru->include_all)
216 return 0;
217
218 ret = dmar_parse_dev_scope((void *)(drhd + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700219 ((void *)drhd) + drhd->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700220 &dmaru->devices_cnt, &dmaru->devices,
221 drhd->segment);
Suresh Siddha1c7d1bc2008-09-03 16:58:35 -0700222 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700223 list_del(&dmaru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700224 kfree(dmaru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700225 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700226 return ret;
227}
228
David Woodhouseaa697072009-10-07 12:18:00 +0100229#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700230static int __init
231dmar_parse_one_rhsa(struct acpi_dmar_header *header)
232{
233 struct acpi_dmar_rhsa *rhsa;
234 struct dmar_drhd_unit *drhd;
235
236 rhsa = (struct acpi_dmar_rhsa *)header;
David Woodhouseaa697072009-10-07 12:18:00 +0100237 for_each_drhd_unit(drhd) {
Suresh Siddhaee34b322009-10-02 11:01:21 -0700238 if (drhd->reg_base_addr == rhsa->base_address) {
239 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
240
241 if (!node_online(node))
242 node = -1;
243 drhd->iommu->node = node;
David Woodhouseaa697072009-10-07 12:18:00 +0100244 return 0;
245 }
Suresh Siddhaee34b322009-10-02 11:01:21 -0700246 }
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100247 WARN_TAINT(
248 1, TAINT_FIRMWARE_WORKAROUND,
249 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
250 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
251 drhd->reg_base_addr,
252 dmi_get_system_info(DMI_BIOS_VENDOR),
253 dmi_get_system_info(DMI_BIOS_VERSION),
254 dmi_get_system_info(DMI_PRODUCT_VERSION));
Suresh Siddhaee34b322009-10-02 11:01:21 -0700255
David Woodhouseaa697072009-10-07 12:18:00 +0100256 return 0;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700257}
David Woodhouseaa697072009-10-07 12:18:00 +0100258#endif
Suresh Siddhaee34b322009-10-02 11:01:21 -0700259
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700260static void __init
261dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
262{
263 struct acpi_dmar_hardware_unit *drhd;
264 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800265 struct acpi_dmar_atsr *atsr;
Roland Dreier17b60972009-09-24 12:14:00 -0700266 struct acpi_dmar_rhsa *rhsa;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700267
268 switch (header->type) {
269 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800270 drhd = container_of(header, struct acpi_dmar_hardware_unit,
271 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400272 pr_info("DRHD base: %#016Lx flags: %#x\n",
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800273 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700274 break;
275 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800276 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
277 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400278 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700279 (unsigned long long)rmrr->base_address,
280 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700281 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800282 case ACPI_DMAR_TYPE_ATSR:
283 atsr = container_of(header, struct acpi_dmar_atsr, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400284 pr_info("ATSR flags: %#x\n", atsr->flags);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800285 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700286 case ACPI_DMAR_HARDWARE_AFFINITY:
287 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400288 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
Roland Dreier17b60972009-09-24 12:14:00 -0700289 (unsigned long long)rhsa->base_address,
290 rhsa->proximity_domain);
291 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700292 }
293}
294
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700295/**
296 * dmar_table_detect - checks to see if the platform supports DMAR devices
297 */
298static int __init dmar_table_detect(void)
299{
300 acpi_status status = AE_OK;
301
302 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800303 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
304 (struct acpi_table_header **)&dmar_tbl,
305 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700306
307 if (ACPI_SUCCESS(status) && !dmar_tbl) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400308 pr_warn("Unable to map DMAR\n");
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700309 status = AE_NOT_FOUND;
310 }
311
312 return (ACPI_SUCCESS(status) ? 1 : 0);
313}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700314
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700315/**
316 * parse_dmar_table - parses the DMA reporting table
317 */
318static int __init
319parse_dmar_table(void)
320{
321 struct acpi_table_dmar *dmar;
322 struct acpi_dmar_header *entry_header;
323 int ret = 0;
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800324 int drhd_count = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700325
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700326 /*
327 * Do it again, earlier dmar_tbl mapping could be mapped with
328 * fixed map.
329 */
330 dmar_table_detect();
331
Joseph Cihulaa59b50e2009-06-30 19:31:10 -0700332 /*
333 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
334 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
335 */
336 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
337
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700338 dmar = (struct acpi_table_dmar *)dmar_tbl;
339 if (!dmar)
340 return -ENODEV;
341
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700342 if (dmar->width < PAGE_SHIFT - 1) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400343 pr_warn("Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700344 return -EINVAL;
345 }
346
Donald Dutilee9071b02012-06-08 17:13:11 -0400347 pr_info("Host address width %d\n", dmar->width + 1);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700348
349 entry_header = (struct acpi_dmar_header *)(dmar + 1);
350 while (((unsigned long)entry_header) <
351 (((unsigned long)dmar) + dmar_tbl->length)) {
Tony Battersby084eb962009-02-11 13:24:19 -0800352 /* Avoid looping forever on bad ACPI tables */
353 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400354 pr_warn("Invalid 0-length structure\n");
Tony Battersby084eb962009-02-11 13:24:19 -0800355 ret = -EINVAL;
356 break;
357 }
358
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700359 dmar_table_print_dmar_entry(entry_header);
360
361 switch (entry_header->type) {
362 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800363 drhd_count++;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700364 ret = dmar_parse_one_drhd(entry_header);
365 break;
366 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
367 ret = dmar_parse_one_rmrr(entry_header);
368 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800369 case ACPI_DMAR_TYPE_ATSR:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800370 ret = dmar_parse_one_atsr(entry_header);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800371 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700372 case ACPI_DMAR_HARDWARE_AFFINITY:
David Woodhouseaa697072009-10-07 12:18:00 +0100373#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700374 ret = dmar_parse_one_rhsa(entry_header);
David Woodhouseaa697072009-10-07 12:18:00 +0100375#endif
Roland Dreier17b60972009-09-24 12:14:00 -0700376 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700377 default:
Donald Dutilee9071b02012-06-08 17:13:11 -0400378 pr_warn("Unknown DMAR structure type %d\n",
Roland Dreier4de75cf2009-09-24 01:01:29 +0100379 entry_header->type);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700380 ret = 0; /* for forward compatibility */
381 break;
382 }
383 if (ret)
384 break;
385
386 entry_header = ((void *)entry_header + entry_header->length);
387 }
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800388 if (drhd_count == 0)
389 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700390 return ret;
391}
392
Yinghaidda56542010-04-09 01:07:55 +0100393static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700394 struct pci_dev *dev)
395{
396 int index;
397
398 while (dev) {
399 for (index = 0; index < cnt; index++)
400 if (dev == devices[index])
401 return 1;
402
403 /* Check our parent */
404 dev = dev->bus->self;
405 }
406
407 return 0;
408}
409
410struct dmar_drhd_unit *
411dmar_find_matched_drhd_unit(struct pci_dev *dev)
412{
Yu Zhao2e824f72008-12-22 16:54:58 +0800413 struct dmar_drhd_unit *dmaru = NULL;
414 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700415
Yinghaidda56542010-04-09 01:07:55 +0100416 dev = pci_physfn(dev);
417
Yijing Wang8b161f02013-10-31 17:25:16 +0800418 for_each_drhd_unit(dmaru) {
Yu Zhao2e824f72008-12-22 16:54:58 +0800419 drhd = container_of(dmaru->hdr,
420 struct acpi_dmar_hardware_unit,
421 header);
422
423 if (dmaru->include_all &&
424 drhd->segment == pci_domain_nr(dev->bus))
425 return dmaru;
426
427 if (dmar_pci_device_match(dmaru->devices,
428 dmaru->devices_cnt, dev))
429 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700430 }
431
432 return NULL;
433}
434
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700435int __init dmar_dev_scope_init(void)
436{
Suresh Siddhac2c72862011-08-23 17:05:19 -0700437 static int dmar_dev_scope_initialized;
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700438 struct dmar_drhd_unit *drhd, *drhd_n;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700439 int ret = -ENODEV;
440
Suresh Siddhac2c72862011-08-23 17:05:19 -0700441 if (dmar_dev_scope_initialized)
442 return dmar_dev_scope_initialized;
443
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700444 if (list_empty(&dmar_drhd_units))
445 goto fail;
446
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700447 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700448 ret = dmar_parse_dev(drhd);
449 if (ret)
Suresh Siddhac2c72862011-08-23 17:05:19 -0700450 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700451 }
452
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700453 ret = dmar_parse_rmrr_atsr_dev();
454 if (ret)
455 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700456
Suresh Siddhac2c72862011-08-23 17:05:19 -0700457 dmar_dev_scope_initialized = 1;
458 return 0;
459
460fail:
461 dmar_dev_scope_initialized = ret;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700462 return ret;
463}
464
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700465
466int __init dmar_table_init(void)
467{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700468 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800469 int ret;
470
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700471 if (dmar_table_initialized)
472 return 0;
473
474 dmar_table_initialized = 1;
475
Fenghua Yu093f87d2007-11-21 15:07:14 -0800476 ret = parse_dmar_table();
477 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700478 if (ret != -ENODEV)
Donald Dutilee9071b02012-06-08 17:13:11 -0400479 pr_info("parse DMAR table failure.\n");
Fenghua Yu093f87d2007-11-21 15:07:14 -0800480 return ret;
481 }
482
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700483 if (list_empty(&dmar_drhd_units)) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400484 pr_info("No DMAR devices found\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700485 return -ENODEV;
486 }
Fenghua Yu093f87d2007-11-21 15:07:14 -0800487
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700488 return 0;
489}
490
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100491static void warn_invalid_dmar(u64 addr, const char *message)
492{
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100493 WARN_TAINT_ONCE(
494 1, TAINT_FIRMWARE_WORKAROUND,
495 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
496 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
497 addr, message,
498 dmi_get_system_info(DMI_BIOS_VENDOR),
499 dmi_get_system_info(DMI_BIOS_VERSION),
500 dmi_get_system_info(DMI_PRODUCT_VERSION));
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100501}
David Woodhouse6ecbf012009-12-02 09:20:27 +0000502
Rashika Kheria21004dc2013-12-18 12:01:46 +0530503static int __init check_zero_address(void)
David Woodhouse86cf8982009-11-09 22:15:15 +0000504{
505 struct acpi_table_dmar *dmar;
506 struct acpi_dmar_header *entry_header;
507 struct acpi_dmar_hardware_unit *drhd;
508
509 dmar = (struct acpi_table_dmar *)dmar_tbl;
510 entry_header = (struct acpi_dmar_header *)(dmar + 1);
511
512 while (((unsigned long)entry_header) <
513 (((unsigned long)dmar) + dmar_tbl->length)) {
514 /* Avoid looping forever on bad ACPI tables */
515 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400516 pr_warn("Invalid 0-length structure\n");
David Woodhouse86cf8982009-11-09 22:15:15 +0000517 return 0;
518 }
519
520 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
Chris Wright2c992202009-12-02 09:17:13 +0000521 void __iomem *addr;
522 u64 cap, ecap;
523
David Woodhouse86cf8982009-11-09 22:15:15 +0000524 drhd = (void *)entry_header;
525 if (!drhd->address) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100526 warn_invalid_dmar(0, "");
Chris Wright2c992202009-12-02 09:17:13 +0000527 goto failed;
David Woodhouse86cf8982009-11-09 22:15:15 +0000528 }
Chris Wright2c992202009-12-02 09:17:13 +0000529
530 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
531 if (!addr ) {
532 printk("IOMMU: can't validate: %llx\n", drhd->address);
533 goto failed;
534 }
535 cap = dmar_readq(addr + DMAR_CAP_REG);
536 ecap = dmar_readq(addr + DMAR_ECAP_REG);
537 early_iounmap(addr, VTD_PAGE_SIZE);
538 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100539 warn_invalid_dmar(drhd->address,
540 " returns all ones");
Chris Wright2c992202009-12-02 09:17:13 +0000541 goto failed;
542 }
David Woodhouse86cf8982009-11-09 22:15:15 +0000543 }
544
545 entry_header = ((void *)entry_header + entry_header->length);
546 }
547 return 1;
Chris Wright2c992202009-12-02 09:17:13 +0000548
549failed:
Chris Wright2c992202009-12-02 09:17:13 +0000550 return 0;
David Woodhouse86cf8982009-11-09 22:15:15 +0000551}
552
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400553int __init detect_intel_iommu(void)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700554{
555 int ret;
556
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700557 ret = dmar_table_detect();
David Woodhouse86cf8982009-11-09 22:15:15 +0000558 if (ret)
559 ret = check_zero_address();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700560 {
Linus Torvalds11bd04f2009-12-11 12:18:16 -0800561 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
Suresh Siddha2ae21012008-07-10 11:16:43 -0700562 iommu_detected = 1;
Chris Wright5d990b62009-12-04 12:15:21 -0800563 /* Make sure ACS will be enabled */
564 pci_request_acs();
565 }
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700566
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900567#ifdef CONFIG_X86
568 if (ret)
569 x86_init.iommu.iommu_init = intel_iommu_init;
570#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700571 }
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800572 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700573 dmar_tbl = NULL;
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400574
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -0400575 return ret ? 1 : -ENODEV;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700576}
577
578
Donald Dutile6f5cf522012-06-04 17:29:02 -0400579static void unmap_iommu(struct intel_iommu *iommu)
580{
581 iounmap(iommu->reg);
582 release_mem_region(iommu->reg_phys, iommu->reg_size);
583}
584
585/**
586 * map_iommu: map the iommu's registers
587 * @iommu: the iommu to map
588 * @phys_addr: the physical address of the base resgister
Donald Dutilee9071b02012-06-08 17:13:11 -0400589 *
Donald Dutile6f5cf522012-06-04 17:29:02 -0400590 * Memory map the iommu's registers. Start w/ a single page, and
Donald Dutilee9071b02012-06-08 17:13:11 -0400591 * possibly expand if that turns out to be insufficent.
Donald Dutile6f5cf522012-06-04 17:29:02 -0400592 */
593static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
594{
595 int map_size, err=0;
596
597 iommu->reg_phys = phys_addr;
598 iommu->reg_size = VTD_PAGE_SIZE;
599
600 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
601 pr_err("IOMMU: can't reserve memory\n");
602 err = -EBUSY;
603 goto out;
604 }
605
606 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
607 if (!iommu->reg) {
608 pr_err("IOMMU: can't map the region\n");
609 err = -ENOMEM;
610 goto release;
611 }
612
613 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
614 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
615
616 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
617 err = -EINVAL;
618 warn_invalid_dmar(phys_addr, " returns all ones");
619 goto unmap;
620 }
621
622 /* the registers might be more than one page */
623 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
624 cap_max_fault_reg_offset(iommu->cap));
625 map_size = VTD_PAGE_ALIGN(map_size);
626 if (map_size > iommu->reg_size) {
627 iounmap(iommu->reg);
628 release_mem_region(iommu->reg_phys, iommu->reg_size);
629 iommu->reg_size = map_size;
630 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
631 iommu->name)) {
632 pr_err("IOMMU: can't reserve memory\n");
633 err = -EBUSY;
634 goto out;
635 }
636 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
637 if (!iommu->reg) {
638 pr_err("IOMMU: can't map the region\n");
639 err = -ENOMEM;
640 goto release;
641 }
642 }
643 err = 0;
644 goto out;
645
646unmap:
647 iounmap(iommu->reg);
648release:
649 release_mem_region(iommu->reg_phys, iommu->reg_size);
650out:
651 return err;
652}
653
Jiang Liu694835d2014-01-06 14:18:16 +0800654static int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700655{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700656 struct intel_iommu *iommu;
Takao Indoh3a93c842013-04-23 17:35:03 +0900657 u32 ver, sts;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700658 static int iommu_allocated = 0;
Joerg Roedel43f73922009-01-03 23:56:27 +0100659 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700660 int msagaw = 0;
Donald Dutile6f5cf522012-06-04 17:29:02 -0400661 int err;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700662
David Woodhouse6ecbf012009-12-02 09:20:27 +0000663 if (!drhd->reg_base_addr) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100664 warn_invalid_dmar(0, "");
David Woodhouse6ecbf012009-12-02 09:20:27 +0000665 return -EINVAL;
666 }
667
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700668 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
669 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700670 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700671
672 iommu->seq_id = iommu_allocated++;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700673 sprintf (iommu->name, "dmar%d", iommu->seq_id);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700674
Donald Dutile6f5cf522012-06-04 17:29:02 -0400675 err = map_iommu(iommu, drhd->reg_base_addr);
676 if (err) {
677 pr_err("IOMMU: failed to map %s\n", iommu->name);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700678 goto error;
679 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700680
Donald Dutile6f5cf522012-06-04 17:29:02 -0400681 err = -EINVAL;
Weidong Han1b573682008-12-08 15:34:06 +0800682 agaw = iommu_calculate_agaw(iommu);
683 if (agaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400684 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
685 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100686 goto err_unmap;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700687 }
688 msagaw = iommu_calculate_max_sagaw(iommu);
689 if (msagaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400690 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +0800691 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100692 goto err_unmap;
Weidong Han1b573682008-12-08 15:34:06 +0800693 }
694 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700695 iommu->msagaw = msagaw;
Weidong Han1b573682008-12-08 15:34:06 +0800696
Suresh Siddhaee34b322009-10-02 11:01:21 -0700697 iommu->node = -1;
698
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700699 ver = readl(iommu->reg + DMAR_VER_REG);
Yinghai Lu680a7522010-04-08 19:58:23 +0100700 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
701 iommu->seq_id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700702 (unsigned long long)drhd->reg_base_addr,
703 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
704 (unsigned long long)iommu->cap,
705 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700706
Takao Indoh3a93c842013-04-23 17:35:03 +0900707 /* Reflect status in gcmd */
708 sts = readl(iommu->reg + DMAR_GSTS_REG);
709 if (sts & DMA_GSTS_IRES)
710 iommu->gcmd |= DMA_GCMD_IRE;
711 if (sts & DMA_GSTS_TES)
712 iommu->gcmd |= DMA_GCMD_TE;
713 if (sts & DMA_GSTS_QIES)
714 iommu->gcmd |= DMA_GCMD_QIE;
715
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200716 raw_spin_lock_init(&iommu->register_lock);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700717
718 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700719 return 0;
David Woodhouse08155652009-08-04 09:17:20 +0100720
721 err_unmap:
Donald Dutile6f5cf522012-06-04 17:29:02 -0400722 unmap_iommu(iommu);
David Woodhouse08155652009-08-04 09:17:20 +0100723 error:
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700724 kfree(iommu);
Donald Dutile6f5cf522012-06-04 17:29:02 -0400725 return err;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700726}
727
728void free_iommu(struct intel_iommu *iommu)
729{
730 if (!iommu)
731 return;
732
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700733 free_dmar_iommu(iommu);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700734
735 if (iommu->reg)
Donald Dutile6f5cf522012-06-04 17:29:02 -0400736 unmap_iommu(iommu);
737
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700738 kfree(iommu);
739}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700740
741/*
742 * Reclaim all the submitted descriptors which have completed its work.
743 */
744static inline void reclaim_free_desc(struct q_inval *qi)
745{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800746 while (qi->desc_status[qi->free_tail] == QI_DONE ||
747 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -0700748 qi->desc_status[qi->free_tail] = QI_FREE;
749 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
750 qi->free_cnt++;
751 }
752}
753
Yu Zhao704126a2009-01-04 16:28:52 +0800754static int qi_check_fault(struct intel_iommu *iommu, int index)
755{
756 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800757 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +0800758 struct q_inval *qi = iommu->qi;
759 int wait_index = (index + 1) % QI_LENGTH;
760
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800761 if (qi->desc_status[wait_index] == QI_ABORT)
762 return -EAGAIN;
763
Yu Zhao704126a2009-01-04 16:28:52 +0800764 fault = readl(iommu->reg + DMAR_FSTS_REG);
765
766 /*
767 * If IQE happens, the head points to the descriptor associated
768 * with the error. No new descriptors are fetched until the IQE
769 * is cleared.
770 */
771 if (fault & DMA_FSTS_IQE) {
772 head = readl(iommu->reg + DMAR_IQH_REG);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800773 if ((head >> DMAR_IQ_SHIFT) == index) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400774 pr_err("VT-d detected invalid descriptor: "
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800775 "low=%llx, high=%llx\n",
776 (unsigned long long)qi->desc[index].low,
777 (unsigned long long)qi->desc[index].high);
Yu Zhao704126a2009-01-04 16:28:52 +0800778 memcpy(&qi->desc[index], &qi->desc[wait_index],
779 sizeof(struct qi_desc));
780 __iommu_flush_cache(iommu, &qi->desc[index],
781 sizeof(struct qi_desc));
782 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
783 return -EINVAL;
784 }
785 }
786
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800787 /*
788 * If ITE happens, all pending wait_desc commands are aborted.
789 * No new descriptors are fetched until the ITE is cleared.
790 */
791 if (fault & DMA_FSTS_ITE) {
792 head = readl(iommu->reg + DMAR_IQH_REG);
793 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
794 head |= 1;
795 tail = readl(iommu->reg + DMAR_IQT_REG);
796 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
797
798 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
799
800 do {
801 if (qi->desc_status[head] == QI_IN_USE)
802 qi->desc_status[head] = QI_ABORT;
803 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
804 } while (head != tail);
805
806 if (qi->desc_status[wait_index] == QI_ABORT)
807 return -EAGAIN;
808 }
809
810 if (fault & DMA_FSTS_ICE)
811 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
812
Yu Zhao704126a2009-01-04 16:28:52 +0800813 return 0;
814}
815
Suresh Siddhafe962e92008-07-10 11:16:42 -0700816/*
817 * Submit the queued invalidation descriptor to the remapping
818 * hardware unit and wait for its completion.
819 */
Yu Zhao704126a2009-01-04 16:28:52 +0800820int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -0700821{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800822 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700823 struct q_inval *qi = iommu->qi;
824 struct qi_desc *hw, wait_desc;
825 int wait_index, index;
826 unsigned long flags;
827
828 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +0800829 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700830
831 hw = qi->desc;
832
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800833restart:
834 rc = 0;
835
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200836 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700837 while (qi->free_cnt < 3) {
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200838 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700839 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200840 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700841 }
842
843 index = qi->free_head;
844 wait_index = (index + 1) % QI_LENGTH;
845
846 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
847
848 hw[index] = *desc;
849
Yu Zhao704126a2009-01-04 16:28:52 +0800850 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
851 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700852 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
853
854 hw[wait_index] = wait_desc;
855
856 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
857 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
858
859 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
860 qi->free_cnt -= 2;
861
Suresh Siddhafe962e92008-07-10 11:16:42 -0700862 /*
863 * update the HW tail register indicating the presence of
864 * new descriptors.
865 */
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800866 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700867
868 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700869 /*
870 * We will leave the interrupts disabled, to prevent interrupt
871 * context to queue another cmd while a cmd is already submitted
872 * and waiting for completion on this cpu. This is to avoid
873 * a deadlock where the interrupt context can wait indefinitely
874 * for free slots in the queue.
875 */
Yu Zhao704126a2009-01-04 16:28:52 +0800876 rc = qi_check_fault(iommu, index);
877 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800878 break;
Yu Zhao704126a2009-01-04 16:28:52 +0800879
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200880 raw_spin_unlock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700881 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200882 raw_spin_lock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700883 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800884
885 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700886
887 reclaim_free_desc(qi);
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200888 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800889
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800890 if (rc == -EAGAIN)
891 goto restart;
892
Yu Zhao704126a2009-01-04 16:28:52 +0800893 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700894}
895
896/*
897 * Flush the global interrupt entry cache.
898 */
899void qi_global_iec(struct intel_iommu *iommu)
900{
901 struct qi_desc desc;
902
903 desc.low = QI_IEC_TYPE;
904 desc.high = 0;
905
Yu Zhao704126a2009-01-04 16:28:52 +0800906 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -0700907 qi_submit_sync(&desc, iommu);
908}
909
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100910void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
911 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700912{
Youquan Song3481f212008-10-16 16:31:55 -0700913 struct qi_desc desc;
914
Youquan Song3481f212008-10-16 16:31:55 -0700915 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
916 | QI_CC_GRAN(type) | QI_CC_TYPE;
917 desc.high = 0;
918
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100919 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700920}
921
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100922void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
923 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700924{
925 u8 dw = 0, dr = 0;
926
927 struct qi_desc desc;
928 int ih = 0;
929
Youquan Song3481f212008-10-16 16:31:55 -0700930 if (cap_write_drain(iommu->cap))
931 dw = 1;
932
933 if (cap_read_drain(iommu->cap))
934 dr = 1;
935
936 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
937 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
938 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
939 | QI_IOTLB_AM(size_order);
940
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100941 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700942}
943
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800944void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
945 u64 addr, unsigned mask)
946{
947 struct qi_desc desc;
948
949 if (mask) {
950 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
951 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
952 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
953 } else
954 desc.high = QI_DEV_IOTLB_ADDR(addr);
955
956 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
957 qdep = 0;
958
959 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
960 QI_DIOTLB_TYPE;
961
962 qi_submit_sync(&desc, iommu);
963}
964
Suresh Siddhafe962e92008-07-10 11:16:42 -0700965/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700966 * Disable Queued Invalidation interface.
967 */
968void dmar_disable_qi(struct intel_iommu *iommu)
969{
970 unsigned long flags;
971 u32 sts;
972 cycles_t start_time = get_cycles();
973
974 if (!ecap_qis(iommu->ecap))
975 return;
976
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200977 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700978
979 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
980 if (!(sts & DMA_GSTS_QIES))
981 goto end;
982
983 /*
984 * Give a chance to HW to complete the pending invalidation requests.
985 */
986 while ((readl(iommu->reg + DMAR_IQT_REG) !=
987 readl(iommu->reg + DMAR_IQH_REG)) &&
988 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
989 cpu_relax();
990
991 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700992 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
993
994 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
995 !(sts & DMA_GSTS_QIES), sts);
996end:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200997 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700998}
999
1000/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001001 * Enable queued invalidation.
1002 */
1003static void __dmar_enable_qi(struct intel_iommu *iommu)
1004{
David Woodhousec416daa2009-05-10 20:30:58 +01001005 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001006 unsigned long flags;
1007 struct q_inval *qi = iommu->qi;
1008
1009 qi->free_head = qi->free_tail = 0;
1010 qi->free_cnt = QI_LENGTH;
1011
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001012 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001013
1014 /* write zero to the tail reg */
1015 writel(0, iommu->reg + DMAR_IQT_REG);
1016
1017 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1018
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001019 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +01001020 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001021
1022 /* Make sure hardware complete it */
1023 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1024
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001025 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001026}
1027
1028/*
Suresh Siddhafe962e92008-07-10 11:16:42 -07001029 * Enable Queued Invalidation interface. This is a must to support
1030 * interrupt-remapping. Also used by DMA-remapping, which replaces
1031 * register based IOTLB invalidation.
1032 */
1033int dmar_enable_qi(struct intel_iommu *iommu)
1034{
Suresh Siddhafe962e92008-07-10 11:16:42 -07001035 struct q_inval *qi;
Suresh Siddha751cafe2009-10-02 11:01:22 -07001036 struct page *desc_page;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001037
1038 if (!ecap_qis(iommu->ecap))
1039 return -ENOENT;
1040
1041 /*
1042 * queued invalidation is already setup and enabled.
1043 */
1044 if (iommu->qi)
1045 return 0;
1046
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001047 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001048 if (!iommu->qi)
1049 return -ENOMEM;
1050
1051 qi = iommu->qi;
1052
Suresh Siddha751cafe2009-10-02 11:01:22 -07001053
1054 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1055 if (!desc_page) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001056 kfree(qi);
1057 iommu->qi = 0;
1058 return -ENOMEM;
1059 }
1060
Suresh Siddha751cafe2009-10-02 11:01:22 -07001061 qi->desc = page_address(desc_page);
1062
Hannes Reinecke37a40712013-02-06 09:50:10 +01001063 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001064 if (!qi->desc_status) {
1065 free_page((unsigned long) qi->desc);
1066 kfree(qi);
1067 iommu->qi = 0;
1068 return -ENOMEM;
1069 }
1070
1071 qi->free_head = qi->free_tail = 0;
1072 qi->free_cnt = QI_LENGTH;
1073
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001074 raw_spin_lock_init(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001075
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001076 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001077
1078 return 0;
1079}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001080
1081/* iommu interrupt handling. Most stuff are MSI-like. */
1082
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001083enum faulttype {
1084 DMA_REMAP,
1085 INTR_REMAP,
1086 UNKNOWN,
1087};
1088
1089static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001090{
1091 "Software",
1092 "Present bit in root entry is clear",
1093 "Present bit in context entry is clear",
1094 "Invalid context entry",
1095 "Access beyond MGAW",
1096 "PTE Write access is not set",
1097 "PTE Read access is not set",
1098 "Next page table ptr is invalid",
1099 "Root table address invalid",
1100 "Context table ptr is invalid",
1101 "non-zero reserved fields in RTP",
1102 "non-zero reserved fields in CTP",
1103 "non-zero reserved fields in PTE",
Li, Zhen-Hua4ecccd92013-03-06 10:43:17 +08001104 "PCE for translation request specifies blocking",
Suresh Siddha0ac24912009-03-16 17:04:54 -07001105};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001106
Suresh Siddha95a02e92012-03-30 11:47:07 -07001107static const char *irq_remap_fault_reasons[] =
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001108{
1109 "Detected reserved fields in the decoded interrupt-remapped request",
1110 "Interrupt index exceeded the interrupt-remapping table size",
1111 "Present field in the IRTE entry is clear",
1112 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1113 "Detected reserved fields in the IRTE entry",
1114 "Blocked a compatibility format interrupt request",
1115 "Blocked an interrupt request due to source-id verification failure",
1116};
1117
Rashika Kheria21004dc2013-12-18 12:01:46 +05301118static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001119{
Dan Carpenterfefe1ed2012-05-13 20:09:38 +03001120 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1121 ARRAY_SIZE(irq_remap_fault_reasons))) {
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001122 *fault_type = INTR_REMAP;
Suresh Siddha95a02e92012-03-30 11:47:07 -07001123 return irq_remap_fault_reasons[fault_reason - 0x20];
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001124 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1125 *fault_type = DMA_REMAP;
1126 return dma_remap_fault_reasons[fault_reason];
1127 } else {
1128 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001129 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001130 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001131}
1132
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001133void dmar_msi_unmask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001134{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001135 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001136 unsigned long flag;
1137
1138 /* unmask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001139 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001140 writel(0, iommu->reg + DMAR_FECTL_REG);
1141 /* Read a reg to force flush the post write */
1142 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001143 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001144}
1145
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001146void dmar_msi_mask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001147{
1148 unsigned long flag;
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001149 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001150
1151 /* mask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001152 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001153 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1154 /* Read a reg to force flush the post write */
1155 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001156 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001157}
1158
1159void dmar_msi_write(int irq, struct msi_msg *msg)
1160{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001161 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001162 unsigned long flag;
1163
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001164 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001165 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1166 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1167 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001168 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001169}
1170
1171void dmar_msi_read(int irq, struct msi_msg *msg)
1172{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001173 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001174 unsigned long flag;
1175
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001176 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001177 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1178 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1179 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001180 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001181}
1182
1183static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1184 u8 fault_reason, u16 source_id, unsigned long long addr)
1185{
1186 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001187 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001188
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001189 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001190
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001191 if (fault_type == INTR_REMAP)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001192 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001193 "fault index %llx\n"
1194 "INTR-REMAP:[fault reason %02d] %s\n",
1195 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1196 PCI_FUNC(source_id & 0xFF), addr >> 48,
1197 fault_reason, reason);
1198 else
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001199 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001200 "fault addr %llx \n"
1201 "DMAR:[fault reason %02d] %s\n",
1202 (type ? "DMA Read" : "DMA Write"),
1203 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1204 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001205 return 0;
1206}
1207
1208#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001209irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001210{
1211 struct intel_iommu *iommu = dev_id;
1212 int reg, fault_index;
1213 u32 fault_status;
1214 unsigned long flag;
1215
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001216 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001217 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001218 if (fault_status)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001219 pr_err("DRHD: handling fault status reg %x\n", fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001220
1221 /* TBD: ignore advanced fault log currently */
1222 if (!(fault_status & DMA_FSTS_PPF))
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001223 goto unlock_exit;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001224
1225 fault_index = dma_fsts_fault_record_index(fault_status);
1226 reg = cap_fault_reg_offset(iommu->cap);
1227 while (1) {
1228 u8 fault_reason;
1229 u16 source_id;
1230 u64 guest_addr;
1231 int type;
1232 u32 data;
1233
1234 /* highest 32 bits */
1235 data = readl(iommu->reg + reg +
1236 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1237 if (!(data & DMA_FRCD_F))
1238 break;
1239
1240 fault_reason = dma_frcd_fault_reason(data);
1241 type = dma_frcd_type(data);
1242
1243 data = readl(iommu->reg + reg +
1244 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1245 source_id = dma_frcd_source_id(data);
1246
1247 guest_addr = dmar_readq(iommu->reg + reg +
1248 fault_index * PRIMARY_FAULT_REG_LEN);
1249 guest_addr = dma_frcd_page_addr(guest_addr);
1250 /* clear the fault */
1251 writel(DMA_FRCD_F, iommu->reg + reg +
1252 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1253
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001254 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001255
1256 dmar_fault_do_one(iommu, type, fault_reason,
1257 source_id, guest_addr);
1258
1259 fault_index++;
Troy Heber8211a7b2009-08-19 15:26:11 -06001260 if (fault_index >= cap_num_fault_regs(iommu->cap))
Suresh Siddha0ac24912009-03-16 17:04:54 -07001261 fault_index = 0;
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001262 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001263 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001264
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001265 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1266
1267unlock_exit:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001268 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001269 return IRQ_HANDLED;
1270}
1271
1272int dmar_set_interrupt(struct intel_iommu *iommu)
1273{
1274 int irq, ret;
1275
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001276 /*
1277 * Check if the fault interrupt is already initialized.
1278 */
1279 if (iommu->irq)
1280 return 0;
1281
Suresh Siddha0ac24912009-03-16 17:04:54 -07001282 irq = create_irq();
1283 if (!irq) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001284 pr_err("IOMMU: no free vectors\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001285 return -EINVAL;
1286 }
1287
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001288 irq_set_handler_data(irq, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001289 iommu->irq = irq;
1290
1291 ret = arch_setup_dmar_msi(irq);
1292 if (ret) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001293 irq_set_handler_data(irq, NULL);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001294 iommu->irq = 0;
1295 destroy_irq(irq);
Chris Wrightdd726432009-05-13 15:55:52 -07001296 return ret;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001297 }
1298
Thomas Gleixner477694e2011-07-19 16:25:42 +02001299 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001300 if (ret)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001301 pr_err("IOMMU: can't request irq\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001302 return ret;
1303}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001304
1305int __init enable_drhd_fault_handling(void)
1306{
1307 struct dmar_drhd_unit *drhd;
1308
1309 /*
1310 * Enable fault control interrupt.
1311 */
1312 for_each_drhd_unit(drhd) {
1313 int ret;
1314 struct intel_iommu *iommu = drhd->iommu;
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001315 u32 fault_status;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001316 ret = dmar_set_interrupt(iommu);
1317
1318 if (ret) {
Donald Dutilee9071b02012-06-08 17:13:11 -04001319 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001320 (unsigned long long)drhd->reg_base_addr, ret);
1321 return -1;
1322 }
Suresh Siddha7f99d942010-11-30 22:22:29 -08001323
1324 /*
1325 * Clear any previous faults.
1326 */
1327 dmar_fault(iommu->irq, iommu);
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001328 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1329 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001330 }
1331
1332 return 0;
1333}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001334
1335/*
1336 * Re-enable Queued Invalidation interface.
1337 */
1338int dmar_reenable_qi(struct intel_iommu *iommu)
1339{
1340 if (!ecap_qis(iommu->ecap))
1341 return -ENOENT;
1342
1343 if (!iommu->qi)
1344 return -ENOENT;
1345
1346 /*
1347 * First disable queued invalidation.
1348 */
1349 dmar_disable_qi(iommu);
1350 /*
1351 * Then enable queued invalidation again. Since there is no pending
1352 * invalidation requests now, it's safe to re-enable queued
1353 * invalidation.
1354 */
1355 __dmar_enable_qi(iommu);
1356
1357 return 0;
1358}
Youquan Song074835f2009-09-09 12:05:39 -04001359
1360/*
1361 * Check interrupt remapping support in DMAR table description.
1362 */
Luck, Tony0b8973a2009-12-16 22:59:29 +00001363int __init dmar_ir_support(void)
Youquan Song074835f2009-09-09 12:05:39 -04001364{
1365 struct acpi_table_dmar *dmar;
1366 dmar = (struct acpi_table_dmar *)dmar_tbl;
Arnaud Patard4f506e02010-03-25 18:02:58 +00001367 if (!dmar)
1368 return 0;
Youquan Song074835f2009-09-09 12:05:39 -04001369 return dmar->flags & 0x1;
1370}
Jiang Liu694835d2014-01-06 14:18:16 +08001371
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -04001372IOMMU_INIT_POST(detect_intel_iommu);