blob: 753c7ecf66a80fb0560d1110c72b4f7ec193a6c4 [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
Donald Dutilee9071b02012-06-08 17:13:11 -040029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070031#include <linux/pci.h>
32#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030033#include <linux/iova.h>
34#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070035#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070036#include <linux/irq.h>
37#include <linux/interrupt.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Len Browneb27cae2009-07-06 23:40:19 -040039#include <linux/dmi.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070041#include <asm/irq_remapping.h>
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -040042#include <asm/iommu_table.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070043
Joerg Roedel078e1ee2012-09-26 12:44:43 +020044#include "irq_remapping.h"
45
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070046/* No locks are needed as DMA remapping hardware unit
47 * list is constructed at boot time and hotplug of
48 * these units are not supported by the architecture.
49 */
50LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070051
Suresh Siddha41750d32011-08-23 17:05:18 -070052struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080053static acpi_size dmar_tbl_size;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070054
Jiang Liu694835d2014-01-06 14:18:16 +080055static int alloc_iommu(struct dmar_drhd_unit *drhd);
Jiang Liua868e6b2014-01-06 14:18:20 +080056static void free_iommu(struct intel_iommu *iommu);
Jiang Liu694835d2014-01-06 14:18:16 +080057
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070058static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
59{
60 /*
61 * add INCLUDE_ALL at the tail, so scan the list will find it at
62 * the very end.
63 */
64 if (drhd->include_all)
65 list_add_tail(&drhd->list, &dmar_drhd_units);
66 else
67 list_add(&drhd->list, &dmar_drhd_units);
68}
69
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070070static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
71 struct pci_dev **dev, u16 segment)
72{
73 struct pci_bus *bus;
74 struct pci_dev *pdev = NULL;
75 struct acpi_dmar_pci_path *path;
76 int count;
77
78 bus = pci_find_bus(segment, scope->bus);
79 path = (struct acpi_dmar_pci_path *)(scope + 1);
80 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
81 / sizeof(struct acpi_dmar_pci_path);
82
83 while (count) {
84 if (pdev)
85 pci_dev_put(pdev);
86 /*
87 * Some BIOSes list non-exist devices in DMAR table, just
88 * ignore it
89 */
90 if (!bus) {
Donald Dutilee9071b02012-06-08 17:13:11 -040091 pr_warn("Device scope bus [%d] not found\n", scope->bus);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070092 break;
93 }
Lv Zhengfa5f5082013-10-31 09:30:22 +080094 pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070095 if (!pdev) {
Donald Dutilee9071b02012-06-08 17:13:11 -040096 /* warning will be printed below */
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070097 break;
98 }
99 path ++;
100 count --;
101 bus = pdev->subordinate;
102 }
103 if (!pdev) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400104 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
Lv Zhengfa5f5082013-10-31 09:30:22 +0800105 segment, scope->bus, path->device, path->function);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700106 return 0;
107 }
108 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
109 pdev->subordinate) || (scope->entry_type == \
110 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
111 pci_dev_put(pdev);
Donald Dutilee9071b02012-06-08 17:13:11 -0400112 pr_warn("Device scope type does not match for %s\n",
113 pci_name(pdev));
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700114 return -EINVAL;
115 }
116 *dev = pdev;
117 return 0;
118}
119
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700120int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
121 struct pci_dev ***devices, u16 segment)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700122{
123 struct acpi_dmar_device_scope *scope;
124 void * tmp = start;
125 int index;
126 int ret;
127
128 *cnt = 0;
129 while (start < end) {
130 scope = start;
131 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
132 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
133 (*cnt)++;
Linn Crosettoae3e7f32013-04-23 12:26:45 -0600134 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
135 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400136 pr_warn("Unsupported device scope\n");
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100137 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700138 start += scope->length;
139 }
140 if (*cnt == 0)
141 return 0;
142
143 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
144 if (!*devices)
145 return -ENOMEM;
146
147 start = tmp;
148 index = 0;
149 while (start < end) {
150 scope = start;
151 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
152 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
153 ret = dmar_parse_one_dev_scope(scope,
154 &(*devices)[index], segment);
155 if (ret) {
Jiang Liuada4d4b2014-01-06 14:18:09 +0800156 dmar_free_dev_scope(devices, cnt);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700157 return ret;
158 }
159 index ++;
160 }
161 start += scope->length;
162 }
163
164 return 0;
165}
166
Jiang Liuada4d4b2014-01-06 14:18:09 +0800167void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
168{
169 if (*devices && *cnt) {
170 while (--*cnt >= 0)
171 pci_dev_put((*devices)[*cnt]);
172 kfree(*devices);
173 *devices = NULL;
174 *cnt = 0;
175 }
176}
177
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700178/**
179 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
180 * structure which uniquely represent one DMA remapping hardware unit
181 * present in the platform
182 */
183static int __init
184dmar_parse_one_drhd(struct acpi_dmar_header *header)
185{
186 struct acpi_dmar_hardware_unit *drhd;
187 struct dmar_drhd_unit *dmaru;
188 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700189
David Woodhousee523b382009-04-10 22:27:48 -0700190 drhd = (struct acpi_dmar_hardware_unit *)header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700191 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
192 if (!dmaru)
193 return -ENOMEM;
194
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700195 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700196 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf992009-04-04 01:45:37 +0100197 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700198 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
199
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700200 ret = alloc_iommu(dmaru);
201 if (ret) {
202 kfree(dmaru);
203 return ret;
204 }
205 dmar_register_drhd_unit(dmaru);
206 return 0;
207}
208
Jiang Liua868e6b2014-01-06 14:18:20 +0800209static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
210{
211 if (dmaru->devices && dmaru->devices_cnt)
212 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
213 if (dmaru->iommu)
214 free_iommu(dmaru->iommu);
215 kfree(dmaru);
216}
217
David Woodhousef82851a2008-10-18 15:43:14 +0100218static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700219{
220 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700221
222 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
223
Yu Zhao2e824f72008-12-22 16:54:58 +0800224 if (dmaru->include_all)
225 return 0;
226
Jiang Liua868e6b2014-01-06 14:18:20 +0800227 return dmar_parse_dev_scope((void *)(drhd + 1),
228 ((void *)drhd) + drhd->header.length,
229 &dmaru->devices_cnt, &dmaru->devices,
230 drhd->segment);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700231}
232
David Woodhouseaa697072009-10-07 12:18:00 +0100233#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700234static int __init
235dmar_parse_one_rhsa(struct acpi_dmar_header *header)
236{
237 struct acpi_dmar_rhsa *rhsa;
238 struct dmar_drhd_unit *drhd;
239
240 rhsa = (struct acpi_dmar_rhsa *)header;
David Woodhouseaa697072009-10-07 12:18:00 +0100241 for_each_drhd_unit(drhd) {
Suresh Siddhaee34b322009-10-02 11:01:21 -0700242 if (drhd->reg_base_addr == rhsa->base_address) {
243 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
244
245 if (!node_online(node))
246 node = -1;
247 drhd->iommu->node = node;
David Woodhouseaa697072009-10-07 12:18:00 +0100248 return 0;
249 }
Suresh Siddhaee34b322009-10-02 11:01:21 -0700250 }
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100251 WARN_TAINT(
252 1, TAINT_FIRMWARE_WORKAROUND,
253 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
254 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
255 drhd->reg_base_addr,
256 dmi_get_system_info(DMI_BIOS_VENDOR),
257 dmi_get_system_info(DMI_BIOS_VERSION),
258 dmi_get_system_info(DMI_PRODUCT_VERSION));
Suresh Siddhaee34b322009-10-02 11:01:21 -0700259
David Woodhouseaa697072009-10-07 12:18:00 +0100260 return 0;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700261}
David Woodhouseaa697072009-10-07 12:18:00 +0100262#endif
Suresh Siddhaee34b322009-10-02 11:01:21 -0700263
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700264static void __init
265dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
266{
267 struct acpi_dmar_hardware_unit *drhd;
268 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800269 struct acpi_dmar_atsr *atsr;
Roland Dreier17b60972009-09-24 12:14:00 -0700270 struct acpi_dmar_rhsa *rhsa;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700271
272 switch (header->type) {
273 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800274 drhd = container_of(header, struct acpi_dmar_hardware_unit,
275 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400276 pr_info("DRHD base: %#016Lx flags: %#x\n",
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800277 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700278 break;
279 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800280 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
281 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400282 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700283 (unsigned long long)rmrr->base_address,
284 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700285 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800286 case ACPI_DMAR_TYPE_ATSR:
287 atsr = container_of(header, struct acpi_dmar_atsr, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400288 pr_info("ATSR flags: %#x\n", atsr->flags);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800289 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700290 case ACPI_DMAR_HARDWARE_AFFINITY:
291 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400292 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
Roland Dreier17b60972009-09-24 12:14:00 -0700293 (unsigned long long)rhsa->base_address,
294 rhsa->proximity_domain);
295 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700296 }
297}
298
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700299/**
300 * dmar_table_detect - checks to see if the platform supports DMAR devices
301 */
302static int __init dmar_table_detect(void)
303{
304 acpi_status status = AE_OK;
305
306 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800307 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
308 (struct acpi_table_header **)&dmar_tbl,
309 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700310
311 if (ACPI_SUCCESS(status) && !dmar_tbl) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400312 pr_warn("Unable to map DMAR\n");
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700313 status = AE_NOT_FOUND;
314 }
315
316 return (ACPI_SUCCESS(status) ? 1 : 0);
317}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700318
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700319/**
320 * parse_dmar_table - parses the DMA reporting table
321 */
322static int __init
323parse_dmar_table(void)
324{
325 struct acpi_table_dmar *dmar;
326 struct acpi_dmar_header *entry_header;
327 int ret = 0;
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800328 int drhd_count = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700329
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700330 /*
331 * Do it again, earlier dmar_tbl mapping could be mapped with
332 * fixed map.
333 */
334 dmar_table_detect();
335
Joseph Cihulaa59b50e2009-06-30 19:31:10 -0700336 /*
337 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
338 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
339 */
340 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
341
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700342 dmar = (struct acpi_table_dmar *)dmar_tbl;
343 if (!dmar)
344 return -ENODEV;
345
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700346 if (dmar->width < PAGE_SHIFT - 1) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400347 pr_warn("Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700348 return -EINVAL;
349 }
350
Donald Dutilee9071b02012-06-08 17:13:11 -0400351 pr_info("Host address width %d\n", dmar->width + 1);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700352
353 entry_header = (struct acpi_dmar_header *)(dmar + 1);
354 while (((unsigned long)entry_header) <
355 (((unsigned long)dmar) + dmar_tbl->length)) {
Tony Battersby084eb962009-02-11 13:24:19 -0800356 /* Avoid looping forever on bad ACPI tables */
357 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400358 pr_warn("Invalid 0-length structure\n");
Tony Battersby084eb962009-02-11 13:24:19 -0800359 ret = -EINVAL;
360 break;
361 }
362
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700363 dmar_table_print_dmar_entry(entry_header);
364
365 switch (entry_header->type) {
366 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800367 drhd_count++;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700368 ret = dmar_parse_one_drhd(entry_header);
369 break;
370 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
371 ret = dmar_parse_one_rmrr(entry_header);
372 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800373 case ACPI_DMAR_TYPE_ATSR:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800374 ret = dmar_parse_one_atsr(entry_header);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800375 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700376 case ACPI_DMAR_HARDWARE_AFFINITY:
David Woodhouseaa697072009-10-07 12:18:00 +0100377#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700378 ret = dmar_parse_one_rhsa(entry_header);
David Woodhouseaa697072009-10-07 12:18:00 +0100379#endif
Roland Dreier17b60972009-09-24 12:14:00 -0700380 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700381 default:
Donald Dutilee9071b02012-06-08 17:13:11 -0400382 pr_warn("Unknown DMAR structure type %d\n",
Roland Dreier4de75cf2009-09-24 01:01:29 +0100383 entry_header->type);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700384 ret = 0; /* for forward compatibility */
385 break;
386 }
387 if (ret)
388 break;
389
390 entry_header = ((void *)entry_header + entry_header->length);
391 }
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800392 if (drhd_count == 0)
393 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700394 return ret;
395}
396
Yinghaidda56542010-04-09 01:07:55 +0100397static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700398 struct pci_dev *dev)
399{
400 int index;
401
402 while (dev) {
403 for (index = 0; index < cnt; index++)
404 if (dev == devices[index])
405 return 1;
406
407 /* Check our parent */
408 dev = dev->bus->self;
409 }
410
411 return 0;
412}
413
414struct dmar_drhd_unit *
415dmar_find_matched_drhd_unit(struct pci_dev *dev)
416{
Yu Zhao2e824f72008-12-22 16:54:58 +0800417 struct dmar_drhd_unit *dmaru = NULL;
418 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700419
Yinghaidda56542010-04-09 01:07:55 +0100420 dev = pci_physfn(dev);
421
Yijing Wang8b161f02013-10-31 17:25:16 +0800422 for_each_drhd_unit(dmaru) {
Yu Zhao2e824f72008-12-22 16:54:58 +0800423 drhd = container_of(dmaru->hdr,
424 struct acpi_dmar_hardware_unit,
425 header);
426
427 if (dmaru->include_all &&
428 drhd->segment == pci_domain_nr(dev->bus))
429 return dmaru;
430
431 if (dmar_pci_device_match(dmaru->devices,
432 dmaru->devices_cnt, dev))
433 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700434 }
435
436 return NULL;
437}
438
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700439int __init dmar_dev_scope_init(void)
440{
Suresh Siddhac2c72862011-08-23 17:05:19 -0700441 static int dmar_dev_scope_initialized;
Jiang Liua868e6b2014-01-06 14:18:20 +0800442 struct dmar_drhd_unit *drhd;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700443 int ret = -ENODEV;
444
Suresh Siddhac2c72862011-08-23 17:05:19 -0700445 if (dmar_dev_scope_initialized)
446 return dmar_dev_scope_initialized;
447
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700448 if (list_empty(&dmar_drhd_units))
449 goto fail;
450
Jiang Liua868e6b2014-01-06 14:18:20 +0800451 list_for_each_entry(drhd, &dmar_drhd_units, list) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700452 ret = dmar_parse_dev(drhd);
453 if (ret)
Suresh Siddhac2c72862011-08-23 17:05:19 -0700454 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700455 }
456
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700457 ret = dmar_parse_rmrr_atsr_dev();
458 if (ret)
459 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700460
Suresh Siddhac2c72862011-08-23 17:05:19 -0700461 dmar_dev_scope_initialized = 1;
462 return 0;
463
464fail:
465 dmar_dev_scope_initialized = ret;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700466 return ret;
467}
468
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700469
470int __init dmar_table_init(void)
471{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700472 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800473 int ret;
474
Jiang Liucc053012014-01-06 14:18:24 +0800475 if (dmar_table_initialized == 0) {
476 ret = parse_dmar_table();
477 if (ret < 0) {
478 if (ret != -ENODEV)
479 pr_info("parse DMAR table failure.\n");
480 } else if (list_empty(&dmar_drhd_units)) {
481 pr_info("No DMAR devices found\n");
482 ret = -ENODEV;
483 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700484
Jiang Liucc053012014-01-06 14:18:24 +0800485 if (ret < 0)
486 dmar_table_initialized = ret;
487 else
488 dmar_table_initialized = 1;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800489 }
490
Jiang Liucc053012014-01-06 14:18:24 +0800491 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700492}
493
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100494static void warn_invalid_dmar(u64 addr, const char *message)
495{
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100496 WARN_TAINT_ONCE(
497 1, TAINT_FIRMWARE_WORKAROUND,
498 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
499 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
500 addr, message,
501 dmi_get_system_info(DMI_BIOS_VENDOR),
502 dmi_get_system_info(DMI_BIOS_VERSION),
503 dmi_get_system_info(DMI_PRODUCT_VERSION));
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100504}
David Woodhouse6ecbf012009-12-02 09:20:27 +0000505
Rashika Kheria21004dc2013-12-18 12:01:46 +0530506static int __init check_zero_address(void)
David Woodhouse86cf8982009-11-09 22:15:15 +0000507{
508 struct acpi_table_dmar *dmar;
509 struct acpi_dmar_header *entry_header;
510 struct acpi_dmar_hardware_unit *drhd;
511
512 dmar = (struct acpi_table_dmar *)dmar_tbl;
513 entry_header = (struct acpi_dmar_header *)(dmar + 1);
514
515 while (((unsigned long)entry_header) <
516 (((unsigned long)dmar) + dmar_tbl->length)) {
517 /* Avoid looping forever on bad ACPI tables */
518 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400519 pr_warn("Invalid 0-length structure\n");
David Woodhouse86cf8982009-11-09 22:15:15 +0000520 return 0;
521 }
522
523 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
Chris Wright2c992202009-12-02 09:17:13 +0000524 void __iomem *addr;
525 u64 cap, ecap;
526
David Woodhouse86cf8982009-11-09 22:15:15 +0000527 drhd = (void *)entry_header;
528 if (!drhd->address) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100529 warn_invalid_dmar(0, "");
Chris Wright2c992202009-12-02 09:17:13 +0000530 goto failed;
David Woodhouse86cf8982009-11-09 22:15:15 +0000531 }
Chris Wright2c992202009-12-02 09:17:13 +0000532
533 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
534 if (!addr ) {
535 printk("IOMMU: can't validate: %llx\n", drhd->address);
536 goto failed;
537 }
538 cap = dmar_readq(addr + DMAR_CAP_REG);
539 ecap = dmar_readq(addr + DMAR_ECAP_REG);
540 early_iounmap(addr, VTD_PAGE_SIZE);
541 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100542 warn_invalid_dmar(drhd->address,
543 " returns all ones");
Chris Wright2c992202009-12-02 09:17:13 +0000544 goto failed;
545 }
David Woodhouse86cf8982009-11-09 22:15:15 +0000546 }
547
548 entry_header = ((void *)entry_header + entry_header->length);
549 }
550 return 1;
Chris Wright2c992202009-12-02 09:17:13 +0000551
552failed:
Chris Wright2c992202009-12-02 09:17:13 +0000553 return 0;
David Woodhouse86cf8982009-11-09 22:15:15 +0000554}
555
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400556int __init detect_intel_iommu(void)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700557{
558 int ret;
559
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700560 ret = dmar_table_detect();
David Woodhouse86cf8982009-11-09 22:15:15 +0000561 if (ret)
562 ret = check_zero_address();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700563 {
Linus Torvalds11bd04f2009-12-11 12:18:16 -0800564 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
Suresh Siddha2ae21012008-07-10 11:16:43 -0700565 iommu_detected = 1;
Chris Wright5d990b62009-12-04 12:15:21 -0800566 /* Make sure ACS will be enabled */
567 pci_request_acs();
568 }
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700569
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900570#ifdef CONFIG_X86
571 if (ret)
572 x86_init.iommu.iommu_init = intel_iommu_init;
573#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700574 }
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800575 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700576 dmar_tbl = NULL;
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400577
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -0400578 return ret ? 1 : -ENODEV;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700579}
580
581
Donald Dutile6f5cf522012-06-04 17:29:02 -0400582static void unmap_iommu(struct intel_iommu *iommu)
583{
584 iounmap(iommu->reg);
585 release_mem_region(iommu->reg_phys, iommu->reg_size);
586}
587
588/**
589 * map_iommu: map the iommu's registers
590 * @iommu: the iommu to map
591 * @phys_addr: the physical address of the base resgister
Donald Dutilee9071b02012-06-08 17:13:11 -0400592 *
Donald Dutile6f5cf522012-06-04 17:29:02 -0400593 * Memory map the iommu's registers. Start w/ a single page, and
Donald Dutilee9071b02012-06-08 17:13:11 -0400594 * possibly expand if that turns out to be insufficent.
Donald Dutile6f5cf522012-06-04 17:29:02 -0400595 */
596static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
597{
598 int map_size, err=0;
599
600 iommu->reg_phys = phys_addr;
601 iommu->reg_size = VTD_PAGE_SIZE;
602
603 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
604 pr_err("IOMMU: can't reserve memory\n");
605 err = -EBUSY;
606 goto out;
607 }
608
609 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
610 if (!iommu->reg) {
611 pr_err("IOMMU: can't map the region\n");
612 err = -ENOMEM;
613 goto release;
614 }
615
616 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
617 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
618
619 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
620 err = -EINVAL;
621 warn_invalid_dmar(phys_addr, " returns all ones");
622 goto unmap;
623 }
624
625 /* the registers might be more than one page */
626 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
627 cap_max_fault_reg_offset(iommu->cap));
628 map_size = VTD_PAGE_ALIGN(map_size);
629 if (map_size > iommu->reg_size) {
630 iounmap(iommu->reg);
631 release_mem_region(iommu->reg_phys, iommu->reg_size);
632 iommu->reg_size = map_size;
633 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
634 iommu->name)) {
635 pr_err("IOMMU: can't reserve memory\n");
636 err = -EBUSY;
637 goto out;
638 }
639 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
640 if (!iommu->reg) {
641 pr_err("IOMMU: can't map the region\n");
642 err = -ENOMEM;
643 goto release;
644 }
645 }
646 err = 0;
647 goto out;
648
649unmap:
650 iounmap(iommu->reg);
651release:
652 release_mem_region(iommu->reg_phys, iommu->reg_size);
653out:
654 return err;
655}
656
Jiang Liu694835d2014-01-06 14:18:16 +0800657static int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700658{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700659 struct intel_iommu *iommu;
Takao Indoh3a93c842013-04-23 17:35:03 +0900660 u32 ver, sts;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700661 static int iommu_allocated = 0;
Joerg Roedel43f73922009-01-03 23:56:27 +0100662 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700663 int msagaw = 0;
Donald Dutile6f5cf522012-06-04 17:29:02 -0400664 int err;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700665
David Woodhouse6ecbf012009-12-02 09:20:27 +0000666 if (!drhd->reg_base_addr) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100667 warn_invalid_dmar(0, "");
David Woodhouse6ecbf012009-12-02 09:20:27 +0000668 return -EINVAL;
669 }
670
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700671 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
672 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700673 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700674
675 iommu->seq_id = iommu_allocated++;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700676 sprintf (iommu->name, "dmar%d", iommu->seq_id);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700677
Donald Dutile6f5cf522012-06-04 17:29:02 -0400678 err = map_iommu(iommu, drhd->reg_base_addr);
679 if (err) {
680 pr_err("IOMMU: failed to map %s\n", iommu->name);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700681 goto error;
682 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700683
Donald Dutile6f5cf522012-06-04 17:29:02 -0400684 err = -EINVAL;
Weidong Han1b573682008-12-08 15:34:06 +0800685 agaw = iommu_calculate_agaw(iommu);
686 if (agaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400687 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
688 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100689 goto err_unmap;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700690 }
691 msagaw = iommu_calculate_max_sagaw(iommu);
692 if (msagaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400693 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +0800694 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100695 goto err_unmap;
Weidong Han1b573682008-12-08 15:34:06 +0800696 }
697 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700698 iommu->msagaw = msagaw;
Weidong Han1b573682008-12-08 15:34:06 +0800699
Suresh Siddhaee34b322009-10-02 11:01:21 -0700700 iommu->node = -1;
701
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700702 ver = readl(iommu->reg + DMAR_VER_REG);
Yinghai Lu680a7522010-04-08 19:58:23 +0100703 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
704 iommu->seq_id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700705 (unsigned long long)drhd->reg_base_addr,
706 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
707 (unsigned long long)iommu->cap,
708 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700709
Takao Indoh3a93c842013-04-23 17:35:03 +0900710 /* Reflect status in gcmd */
711 sts = readl(iommu->reg + DMAR_GSTS_REG);
712 if (sts & DMA_GSTS_IRES)
713 iommu->gcmd |= DMA_GCMD_IRE;
714 if (sts & DMA_GSTS_TES)
715 iommu->gcmd |= DMA_GCMD_TE;
716 if (sts & DMA_GSTS_QIES)
717 iommu->gcmd |= DMA_GCMD_QIE;
718
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200719 raw_spin_lock_init(&iommu->register_lock);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700720
721 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700722 return 0;
David Woodhouse08155652009-08-04 09:17:20 +0100723
724 err_unmap:
Donald Dutile6f5cf522012-06-04 17:29:02 -0400725 unmap_iommu(iommu);
David Woodhouse08155652009-08-04 09:17:20 +0100726 error:
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700727 kfree(iommu);
Donald Dutile6f5cf522012-06-04 17:29:02 -0400728 return err;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700729}
730
Jiang Liua868e6b2014-01-06 14:18:20 +0800731static void free_iommu(struct intel_iommu *iommu)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700732{
Jiang Liua868e6b2014-01-06 14:18:20 +0800733 if (iommu->irq) {
734 free_irq(iommu->irq, iommu);
735 irq_set_handler_data(iommu->irq, NULL);
736 destroy_irq(iommu->irq);
737 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700738
Jiang Liua84da702014-01-06 14:18:23 +0800739 if (iommu->qi) {
740 free_page((unsigned long)iommu->qi->desc);
741 kfree(iommu->qi->desc_status);
742 kfree(iommu->qi);
743 }
744
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700745 if (iommu->reg)
Donald Dutile6f5cf522012-06-04 17:29:02 -0400746 unmap_iommu(iommu);
747
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700748 kfree(iommu);
749}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700750
751/*
752 * Reclaim all the submitted descriptors which have completed its work.
753 */
754static inline void reclaim_free_desc(struct q_inval *qi)
755{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800756 while (qi->desc_status[qi->free_tail] == QI_DONE ||
757 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -0700758 qi->desc_status[qi->free_tail] = QI_FREE;
759 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
760 qi->free_cnt++;
761 }
762}
763
Yu Zhao704126a2009-01-04 16:28:52 +0800764static int qi_check_fault(struct intel_iommu *iommu, int index)
765{
766 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800767 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +0800768 struct q_inval *qi = iommu->qi;
769 int wait_index = (index + 1) % QI_LENGTH;
770
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800771 if (qi->desc_status[wait_index] == QI_ABORT)
772 return -EAGAIN;
773
Yu Zhao704126a2009-01-04 16:28:52 +0800774 fault = readl(iommu->reg + DMAR_FSTS_REG);
775
776 /*
777 * If IQE happens, the head points to the descriptor associated
778 * with the error. No new descriptors are fetched until the IQE
779 * is cleared.
780 */
781 if (fault & DMA_FSTS_IQE) {
782 head = readl(iommu->reg + DMAR_IQH_REG);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800783 if ((head >> DMAR_IQ_SHIFT) == index) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400784 pr_err("VT-d detected invalid descriptor: "
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800785 "low=%llx, high=%llx\n",
786 (unsigned long long)qi->desc[index].low,
787 (unsigned long long)qi->desc[index].high);
Yu Zhao704126a2009-01-04 16:28:52 +0800788 memcpy(&qi->desc[index], &qi->desc[wait_index],
789 sizeof(struct qi_desc));
790 __iommu_flush_cache(iommu, &qi->desc[index],
791 sizeof(struct qi_desc));
792 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
793 return -EINVAL;
794 }
795 }
796
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800797 /*
798 * If ITE happens, all pending wait_desc commands are aborted.
799 * No new descriptors are fetched until the ITE is cleared.
800 */
801 if (fault & DMA_FSTS_ITE) {
802 head = readl(iommu->reg + DMAR_IQH_REG);
803 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
804 head |= 1;
805 tail = readl(iommu->reg + DMAR_IQT_REG);
806 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
807
808 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
809
810 do {
811 if (qi->desc_status[head] == QI_IN_USE)
812 qi->desc_status[head] = QI_ABORT;
813 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
814 } while (head != tail);
815
816 if (qi->desc_status[wait_index] == QI_ABORT)
817 return -EAGAIN;
818 }
819
820 if (fault & DMA_FSTS_ICE)
821 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
822
Yu Zhao704126a2009-01-04 16:28:52 +0800823 return 0;
824}
825
Suresh Siddhafe962e92008-07-10 11:16:42 -0700826/*
827 * Submit the queued invalidation descriptor to the remapping
828 * hardware unit and wait for its completion.
829 */
Yu Zhao704126a2009-01-04 16:28:52 +0800830int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -0700831{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800832 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700833 struct q_inval *qi = iommu->qi;
834 struct qi_desc *hw, wait_desc;
835 int wait_index, index;
836 unsigned long flags;
837
838 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +0800839 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700840
841 hw = qi->desc;
842
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800843restart:
844 rc = 0;
845
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200846 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700847 while (qi->free_cnt < 3) {
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200848 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700849 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200850 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700851 }
852
853 index = qi->free_head;
854 wait_index = (index + 1) % QI_LENGTH;
855
856 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
857
858 hw[index] = *desc;
859
Yu Zhao704126a2009-01-04 16:28:52 +0800860 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
861 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700862 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
863
864 hw[wait_index] = wait_desc;
865
866 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
867 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
868
869 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
870 qi->free_cnt -= 2;
871
Suresh Siddhafe962e92008-07-10 11:16:42 -0700872 /*
873 * update the HW tail register indicating the presence of
874 * new descriptors.
875 */
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800876 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700877
878 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700879 /*
880 * We will leave the interrupts disabled, to prevent interrupt
881 * context to queue another cmd while a cmd is already submitted
882 * and waiting for completion on this cpu. This is to avoid
883 * a deadlock where the interrupt context can wait indefinitely
884 * for free slots in the queue.
885 */
Yu Zhao704126a2009-01-04 16:28:52 +0800886 rc = qi_check_fault(iommu, index);
887 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800888 break;
Yu Zhao704126a2009-01-04 16:28:52 +0800889
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200890 raw_spin_unlock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700891 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200892 raw_spin_lock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700893 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800894
895 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700896
897 reclaim_free_desc(qi);
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200898 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800899
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800900 if (rc == -EAGAIN)
901 goto restart;
902
Yu Zhao704126a2009-01-04 16:28:52 +0800903 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700904}
905
906/*
907 * Flush the global interrupt entry cache.
908 */
909void qi_global_iec(struct intel_iommu *iommu)
910{
911 struct qi_desc desc;
912
913 desc.low = QI_IEC_TYPE;
914 desc.high = 0;
915
Yu Zhao704126a2009-01-04 16:28:52 +0800916 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -0700917 qi_submit_sync(&desc, iommu);
918}
919
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100920void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
921 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700922{
Youquan Song3481f212008-10-16 16:31:55 -0700923 struct qi_desc desc;
924
Youquan Song3481f212008-10-16 16:31:55 -0700925 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
926 | QI_CC_GRAN(type) | QI_CC_TYPE;
927 desc.high = 0;
928
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100929 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700930}
931
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100932void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
933 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700934{
935 u8 dw = 0, dr = 0;
936
937 struct qi_desc desc;
938 int ih = 0;
939
Youquan Song3481f212008-10-16 16:31:55 -0700940 if (cap_write_drain(iommu->cap))
941 dw = 1;
942
943 if (cap_read_drain(iommu->cap))
944 dr = 1;
945
946 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
947 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
948 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
949 | QI_IOTLB_AM(size_order);
950
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100951 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700952}
953
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800954void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
955 u64 addr, unsigned mask)
956{
957 struct qi_desc desc;
958
959 if (mask) {
960 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
961 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
962 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
963 } else
964 desc.high = QI_DEV_IOTLB_ADDR(addr);
965
966 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
967 qdep = 0;
968
969 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
970 QI_DIOTLB_TYPE;
971
972 qi_submit_sync(&desc, iommu);
973}
974
Suresh Siddhafe962e92008-07-10 11:16:42 -0700975/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700976 * Disable Queued Invalidation interface.
977 */
978void dmar_disable_qi(struct intel_iommu *iommu)
979{
980 unsigned long flags;
981 u32 sts;
982 cycles_t start_time = get_cycles();
983
984 if (!ecap_qis(iommu->ecap))
985 return;
986
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200987 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700988
989 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
990 if (!(sts & DMA_GSTS_QIES))
991 goto end;
992
993 /*
994 * Give a chance to HW to complete the pending invalidation requests.
995 */
996 while ((readl(iommu->reg + DMAR_IQT_REG) !=
997 readl(iommu->reg + DMAR_IQH_REG)) &&
998 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
999 cpu_relax();
1000
1001 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001002 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1003
1004 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1005 !(sts & DMA_GSTS_QIES), sts);
1006end:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001007 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001008}
1009
1010/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001011 * Enable queued invalidation.
1012 */
1013static void __dmar_enable_qi(struct intel_iommu *iommu)
1014{
David Woodhousec416daa2009-05-10 20:30:58 +01001015 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001016 unsigned long flags;
1017 struct q_inval *qi = iommu->qi;
1018
1019 qi->free_head = qi->free_tail = 0;
1020 qi->free_cnt = QI_LENGTH;
1021
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001022 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001023
1024 /* write zero to the tail reg */
1025 writel(0, iommu->reg + DMAR_IQT_REG);
1026
1027 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1028
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001029 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +01001030 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001031
1032 /* Make sure hardware complete it */
1033 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1034
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001035 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001036}
1037
1038/*
Suresh Siddhafe962e92008-07-10 11:16:42 -07001039 * Enable Queued Invalidation interface. This is a must to support
1040 * interrupt-remapping. Also used by DMA-remapping, which replaces
1041 * register based IOTLB invalidation.
1042 */
1043int dmar_enable_qi(struct intel_iommu *iommu)
1044{
Suresh Siddhafe962e92008-07-10 11:16:42 -07001045 struct q_inval *qi;
Suresh Siddha751cafe2009-10-02 11:01:22 -07001046 struct page *desc_page;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001047
1048 if (!ecap_qis(iommu->ecap))
1049 return -ENOENT;
1050
1051 /*
1052 * queued invalidation is already setup and enabled.
1053 */
1054 if (iommu->qi)
1055 return 0;
1056
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001057 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001058 if (!iommu->qi)
1059 return -ENOMEM;
1060
1061 qi = iommu->qi;
1062
Suresh Siddha751cafe2009-10-02 11:01:22 -07001063
1064 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1065 if (!desc_page) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001066 kfree(qi);
1067 iommu->qi = 0;
1068 return -ENOMEM;
1069 }
1070
Suresh Siddha751cafe2009-10-02 11:01:22 -07001071 qi->desc = page_address(desc_page);
1072
Hannes Reinecke37a40712013-02-06 09:50:10 +01001073 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001074 if (!qi->desc_status) {
1075 free_page((unsigned long) qi->desc);
1076 kfree(qi);
1077 iommu->qi = 0;
1078 return -ENOMEM;
1079 }
1080
1081 qi->free_head = qi->free_tail = 0;
1082 qi->free_cnt = QI_LENGTH;
1083
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001084 raw_spin_lock_init(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001085
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001086 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001087
1088 return 0;
1089}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001090
1091/* iommu interrupt handling. Most stuff are MSI-like. */
1092
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001093enum faulttype {
1094 DMA_REMAP,
1095 INTR_REMAP,
1096 UNKNOWN,
1097};
1098
1099static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001100{
1101 "Software",
1102 "Present bit in root entry is clear",
1103 "Present bit in context entry is clear",
1104 "Invalid context entry",
1105 "Access beyond MGAW",
1106 "PTE Write access is not set",
1107 "PTE Read access is not set",
1108 "Next page table ptr is invalid",
1109 "Root table address invalid",
1110 "Context table ptr is invalid",
1111 "non-zero reserved fields in RTP",
1112 "non-zero reserved fields in CTP",
1113 "non-zero reserved fields in PTE",
Li, Zhen-Hua4ecccd92013-03-06 10:43:17 +08001114 "PCE for translation request specifies blocking",
Suresh Siddha0ac24912009-03-16 17:04:54 -07001115};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001116
Suresh Siddha95a02e92012-03-30 11:47:07 -07001117static const char *irq_remap_fault_reasons[] =
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001118{
1119 "Detected reserved fields in the decoded interrupt-remapped request",
1120 "Interrupt index exceeded the interrupt-remapping table size",
1121 "Present field in the IRTE entry is clear",
1122 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1123 "Detected reserved fields in the IRTE entry",
1124 "Blocked a compatibility format interrupt request",
1125 "Blocked an interrupt request due to source-id verification failure",
1126};
1127
Rashika Kheria21004dc2013-12-18 12:01:46 +05301128static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001129{
Dan Carpenterfefe1ed2012-05-13 20:09:38 +03001130 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1131 ARRAY_SIZE(irq_remap_fault_reasons))) {
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001132 *fault_type = INTR_REMAP;
Suresh Siddha95a02e92012-03-30 11:47:07 -07001133 return irq_remap_fault_reasons[fault_reason - 0x20];
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001134 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1135 *fault_type = DMA_REMAP;
1136 return dma_remap_fault_reasons[fault_reason];
1137 } else {
1138 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001139 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001140 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001141}
1142
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001143void dmar_msi_unmask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001144{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001145 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001146 unsigned long flag;
1147
1148 /* unmask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001149 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001150 writel(0, iommu->reg + DMAR_FECTL_REG);
1151 /* Read a reg to force flush the post write */
1152 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001153 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001154}
1155
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001156void dmar_msi_mask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001157{
1158 unsigned long flag;
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001159 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001160
1161 /* mask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001162 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001163 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1164 /* Read a reg to force flush the post write */
1165 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001166 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001167}
1168
1169void dmar_msi_write(int irq, struct msi_msg *msg)
1170{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001171 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001172 unsigned long flag;
1173
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001174 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001175 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1176 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1177 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001178 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001179}
1180
1181void dmar_msi_read(int irq, struct msi_msg *msg)
1182{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001183 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001184 unsigned long flag;
1185
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001186 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001187 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1188 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1189 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001190 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001191}
1192
1193static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1194 u8 fault_reason, u16 source_id, unsigned long long addr)
1195{
1196 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001197 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001198
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001199 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001200
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001201 if (fault_type == INTR_REMAP)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001202 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001203 "fault index %llx\n"
1204 "INTR-REMAP:[fault reason %02d] %s\n",
1205 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1206 PCI_FUNC(source_id & 0xFF), addr >> 48,
1207 fault_reason, reason);
1208 else
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001209 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001210 "fault addr %llx \n"
1211 "DMAR:[fault reason %02d] %s\n",
1212 (type ? "DMA Read" : "DMA Write"),
1213 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1214 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001215 return 0;
1216}
1217
1218#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001219irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001220{
1221 struct intel_iommu *iommu = dev_id;
1222 int reg, fault_index;
1223 u32 fault_status;
1224 unsigned long flag;
1225
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001226 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001227 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001228 if (fault_status)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001229 pr_err("DRHD: handling fault status reg %x\n", fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001230
1231 /* TBD: ignore advanced fault log currently */
1232 if (!(fault_status & DMA_FSTS_PPF))
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001233 goto unlock_exit;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001234
1235 fault_index = dma_fsts_fault_record_index(fault_status);
1236 reg = cap_fault_reg_offset(iommu->cap);
1237 while (1) {
1238 u8 fault_reason;
1239 u16 source_id;
1240 u64 guest_addr;
1241 int type;
1242 u32 data;
1243
1244 /* highest 32 bits */
1245 data = readl(iommu->reg + reg +
1246 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1247 if (!(data & DMA_FRCD_F))
1248 break;
1249
1250 fault_reason = dma_frcd_fault_reason(data);
1251 type = dma_frcd_type(data);
1252
1253 data = readl(iommu->reg + reg +
1254 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1255 source_id = dma_frcd_source_id(data);
1256
1257 guest_addr = dmar_readq(iommu->reg + reg +
1258 fault_index * PRIMARY_FAULT_REG_LEN);
1259 guest_addr = dma_frcd_page_addr(guest_addr);
1260 /* clear the fault */
1261 writel(DMA_FRCD_F, iommu->reg + reg +
1262 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1263
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001264 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001265
1266 dmar_fault_do_one(iommu, type, fault_reason,
1267 source_id, guest_addr);
1268
1269 fault_index++;
Troy Heber8211a7b2009-08-19 15:26:11 -06001270 if (fault_index >= cap_num_fault_regs(iommu->cap))
Suresh Siddha0ac24912009-03-16 17:04:54 -07001271 fault_index = 0;
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001272 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001273 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001274
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001275 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1276
1277unlock_exit:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001278 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001279 return IRQ_HANDLED;
1280}
1281
1282int dmar_set_interrupt(struct intel_iommu *iommu)
1283{
1284 int irq, ret;
1285
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001286 /*
1287 * Check if the fault interrupt is already initialized.
1288 */
1289 if (iommu->irq)
1290 return 0;
1291
Suresh Siddha0ac24912009-03-16 17:04:54 -07001292 irq = create_irq();
1293 if (!irq) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001294 pr_err("IOMMU: no free vectors\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001295 return -EINVAL;
1296 }
1297
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001298 irq_set_handler_data(irq, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001299 iommu->irq = irq;
1300
1301 ret = arch_setup_dmar_msi(irq);
1302 if (ret) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001303 irq_set_handler_data(irq, NULL);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001304 iommu->irq = 0;
1305 destroy_irq(irq);
Chris Wrightdd726432009-05-13 15:55:52 -07001306 return ret;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001307 }
1308
Thomas Gleixner477694e2011-07-19 16:25:42 +02001309 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001310 if (ret)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001311 pr_err("IOMMU: can't request irq\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001312 return ret;
1313}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001314
1315int __init enable_drhd_fault_handling(void)
1316{
1317 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08001318 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001319
1320 /*
1321 * Enable fault control interrupt.
1322 */
Jiang Liu7c919772014-01-06 14:18:18 +08001323 for_each_iommu(iommu, drhd) {
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001324 u32 fault_status;
Jiang Liu7c919772014-01-06 14:18:18 +08001325 int ret = dmar_set_interrupt(iommu);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001326
1327 if (ret) {
Donald Dutilee9071b02012-06-08 17:13:11 -04001328 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001329 (unsigned long long)drhd->reg_base_addr, ret);
1330 return -1;
1331 }
Suresh Siddha7f99d942010-11-30 22:22:29 -08001332
1333 /*
1334 * Clear any previous faults.
1335 */
1336 dmar_fault(iommu->irq, iommu);
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001337 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1338 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001339 }
1340
1341 return 0;
1342}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001343
1344/*
1345 * Re-enable Queued Invalidation interface.
1346 */
1347int dmar_reenable_qi(struct intel_iommu *iommu)
1348{
1349 if (!ecap_qis(iommu->ecap))
1350 return -ENOENT;
1351
1352 if (!iommu->qi)
1353 return -ENOENT;
1354
1355 /*
1356 * First disable queued invalidation.
1357 */
1358 dmar_disable_qi(iommu);
1359 /*
1360 * Then enable queued invalidation again. Since there is no pending
1361 * invalidation requests now, it's safe to re-enable queued
1362 * invalidation.
1363 */
1364 __dmar_enable_qi(iommu);
1365
1366 return 0;
1367}
Youquan Song074835f2009-09-09 12:05:39 -04001368
1369/*
1370 * Check interrupt remapping support in DMAR table description.
1371 */
Luck, Tony0b8973a2009-12-16 22:59:29 +00001372int __init dmar_ir_support(void)
Youquan Song074835f2009-09-09 12:05:39 -04001373{
1374 struct acpi_table_dmar *dmar;
1375 dmar = (struct acpi_table_dmar *)dmar_tbl;
Arnaud Patard4f506e02010-03-25 18:02:58 +00001376 if (!dmar)
1377 return 0;
Youquan Song074835f2009-09-09 12:05:39 -04001378 return dmar->flags & 0x1;
1379}
Jiang Liu694835d2014-01-06 14:18:16 +08001380
Jiang Liua868e6b2014-01-06 14:18:20 +08001381static int __init dmar_free_unused_resources(void)
1382{
1383 struct dmar_drhd_unit *dmaru, *dmaru_n;
1384
1385 /* DMAR units are in use */
1386 if (irq_remapping_enabled || intel_iommu_enabled)
1387 return 0;
1388
1389 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1390 list_del(&dmaru->list);
1391 dmar_free_drhd(dmaru);
1392 }
1393
1394 return 0;
1395}
1396
1397late_initcall(dmar_free_unused_resources);
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -04001398IOMMU_INIT_POST(detect_intel_iommu);