blob: 58dde75c52293d773b67fd4fc4dd24147626765c [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
Donald Dutilee9071b02012-06-08 17:13:11 -040029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070031#include <linux/pci.h>
32#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030033#include <linux/iova.h>
34#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070035#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070036#include <linux/irq.h>
37#include <linux/interrupt.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Len Browneb27cae2009-07-06 23:40:19 -040039#include <linux/dmi.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070041#include <asm/irq_remapping.h>
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -040042#include <asm/iommu_table.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070043
Joerg Roedel078e1ee2012-09-26 12:44:43 +020044#include "irq_remapping.h"
45
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070046/* No locks are needed as DMA remapping hardware unit
47 * list is constructed at boot time and hotplug of
48 * these units are not supported by the architecture.
49 */
50LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070051
Suresh Siddha41750d32011-08-23 17:05:18 -070052struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080053static acpi_size dmar_tbl_size;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070054
Jiang Liu694835d2014-01-06 14:18:16 +080055static int alloc_iommu(struct dmar_drhd_unit *drhd);
Jiang Liua868e6b2014-01-06 14:18:20 +080056static void free_iommu(struct intel_iommu *iommu);
Jiang Liu694835d2014-01-06 14:18:16 +080057
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070058static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
59{
60 /*
61 * add INCLUDE_ALL at the tail, so scan the list will find it at
62 * the very end.
63 */
64 if (drhd->include_all)
65 list_add_tail(&drhd->list, &dmar_drhd_units);
66 else
67 list_add(&drhd->list, &dmar_drhd_units);
68}
69
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070070static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
71 struct pci_dev **dev, u16 segment)
72{
73 struct pci_bus *bus;
74 struct pci_dev *pdev = NULL;
75 struct acpi_dmar_pci_path *path;
76 int count;
77
78 bus = pci_find_bus(segment, scope->bus);
79 path = (struct acpi_dmar_pci_path *)(scope + 1);
80 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
81 / sizeof(struct acpi_dmar_pci_path);
82
83 while (count) {
84 if (pdev)
85 pci_dev_put(pdev);
86 /*
87 * Some BIOSes list non-exist devices in DMAR table, just
88 * ignore it
89 */
90 if (!bus) {
Donald Dutilee9071b02012-06-08 17:13:11 -040091 pr_warn("Device scope bus [%d] not found\n", scope->bus);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070092 break;
93 }
Lv Zhengfa5f5082013-10-31 09:30:22 +080094 pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070095 if (!pdev) {
Donald Dutilee9071b02012-06-08 17:13:11 -040096 /* warning will be printed below */
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070097 break;
98 }
99 path ++;
100 count --;
101 bus = pdev->subordinate;
102 }
103 if (!pdev) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400104 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
Lv Zhengfa5f5082013-10-31 09:30:22 +0800105 segment, scope->bus, path->device, path->function);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700106 return 0;
107 }
108 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
109 pdev->subordinate) || (scope->entry_type == \
110 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
111 pci_dev_put(pdev);
Donald Dutilee9071b02012-06-08 17:13:11 -0400112 pr_warn("Device scope type does not match for %s\n",
113 pci_name(pdev));
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700114 return -EINVAL;
115 }
116 *dev = pdev;
117 return 0;
118}
119
Jiang Liubb3a6b72014-02-19 14:07:24 +0800120void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700121{
122 struct acpi_dmar_device_scope *scope;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700123
124 *cnt = 0;
125 while (start < end) {
126 scope = start;
127 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
128 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
129 (*cnt)++;
Linn Crosettoae3e7f32013-04-23 12:26:45 -0600130 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
131 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400132 pr_warn("Unsupported device scope\n");
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100133 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700134 start += scope->length;
135 }
136 if (*cnt == 0)
Jiang Liubb3a6b72014-02-19 14:07:24 +0800137 return NULL;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700138
Jiang Liubb3a6b72014-02-19 14:07:24 +0800139 return kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
140}
141
142int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
143 struct pci_dev ***devices, u16 segment)
144{
145 struct acpi_dmar_device_scope *scope;
146 int index, ret;
147
148 *devices = dmar_alloc_dev_scope(start, end, cnt);
149 if (*cnt == 0)
150 return 0;
151 else if (!*devices)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700152 return -ENOMEM;
153
Jiang Liubb3a6b72014-02-19 14:07:24 +0800154 for (index = 0; start < end; start += scope->length) {
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700155 scope = start;
156 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
157 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
158 ret = dmar_parse_one_dev_scope(scope,
159 &(*devices)[index], segment);
160 if (ret) {
Jiang Liuada4d4b2014-01-06 14:18:09 +0800161 dmar_free_dev_scope(devices, cnt);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700162 return ret;
163 }
164 index ++;
165 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700166 }
167
168 return 0;
169}
170
Jiang Liuada4d4b2014-01-06 14:18:09 +0800171void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
172{
173 if (*devices && *cnt) {
174 while (--*cnt >= 0)
175 pci_dev_put((*devices)[*cnt]);
176 kfree(*devices);
177 *devices = NULL;
178 *cnt = 0;
179 }
180}
181
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700182/**
183 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
184 * structure which uniquely represent one DMA remapping hardware unit
185 * present in the platform
186 */
187static int __init
188dmar_parse_one_drhd(struct acpi_dmar_header *header)
189{
190 struct acpi_dmar_hardware_unit *drhd;
191 struct dmar_drhd_unit *dmaru;
192 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700193
David Woodhousee523b382009-04-10 22:27:48 -0700194 drhd = (struct acpi_dmar_hardware_unit *)header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700195 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
196 if (!dmaru)
197 return -ENOMEM;
198
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700199 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700200 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf992009-04-04 01:45:37 +0100201 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700202 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
203
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700204 ret = alloc_iommu(dmaru);
205 if (ret) {
206 kfree(dmaru);
207 return ret;
208 }
209 dmar_register_drhd_unit(dmaru);
210 return 0;
211}
212
Jiang Liua868e6b2014-01-06 14:18:20 +0800213static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
214{
215 if (dmaru->devices && dmaru->devices_cnt)
216 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
217 if (dmaru->iommu)
218 free_iommu(dmaru->iommu);
219 kfree(dmaru);
220}
221
David Woodhousef82851a2008-10-18 15:43:14 +0100222static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700223{
224 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700225
226 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
227
Yu Zhao2e824f72008-12-22 16:54:58 +0800228 if (dmaru->include_all)
229 return 0;
230
Jiang Liua868e6b2014-01-06 14:18:20 +0800231 return dmar_parse_dev_scope((void *)(drhd + 1),
232 ((void *)drhd) + drhd->header.length,
233 &dmaru->devices_cnt, &dmaru->devices,
234 drhd->segment);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700235}
236
David Woodhouseaa697072009-10-07 12:18:00 +0100237#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700238static int __init
239dmar_parse_one_rhsa(struct acpi_dmar_header *header)
240{
241 struct acpi_dmar_rhsa *rhsa;
242 struct dmar_drhd_unit *drhd;
243
244 rhsa = (struct acpi_dmar_rhsa *)header;
David Woodhouseaa697072009-10-07 12:18:00 +0100245 for_each_drhd_unit(drhd) {
Suresh Siddhaee34b322009-10-02 11:01:21 -0700246 if (drhd->reg_base_addr == rhsa->base_address) {
247 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
248
249 if (!node_online(node))
250 node = -1;
251 drhd->iommu->node = node;
David Woodhouseaa697072009-10-07 12:18:00 +0100252 return 0;
253 }
Suresh Siddhaee34b322009-10-02 11:01:21 -0700254 }
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100255 WARN_TAINT(
256 1, TAINT_FIRMWARE_WORKAROUND,
257 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
258 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
259 drhd->reg_base_addr,
260 dmi_get_system_info(DMI_BIOS_VENDOR),
261 dmi_get_system_info(DMI_BIOS_VERSION),
262 dmi_get_system_info(DMI_PRODUCT_VERSION));
Suresh Siddhaee34b322009-10-02 11:01:21 -0700263
David Woodhouseaa697072009-10-07 12:18:00 +0100264 return 0;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700265}
David Woodhouseaa697072009-10-07 12:18:00 +0100266#endif
Suresh Siddhaee34b322009-10-02 11:01:21 -0700267
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700268static void __init
269dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
270{
271 struct acpi_dmar_hardware_unit *drhd;
272 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800273 struct acpi_dmar_atsr *atsr;
Roland Dreier17b60972009-09-24 12:14:00 -0700274 struct acpi_dmar_rhsa *rhsa;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700275
276 switch (header->type) {
277 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800278 drhd = container_of(header, struct acpi_dmar_hardware_unit,
279 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400280 pr_info("DRHD base: %#016Lx flags: %#x\n",
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800281 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700282 break;
283 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800284 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
285 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400286 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700287 (unsigned long long)rmrr->base_address,
288 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700289 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800290 case ACPI_DMAR_TYPE_ATSR:
291 atsr = container_of(header, struct acpi_dmar_atsr, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400292 pr_info("ATSR flags: %#x\n", atsr->flags);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800293 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700294 case ACPI_DMAR_HARDWARE_AFFINITY:
295 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400296 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
Roland Dreier17b60972009-09-24 12:14:00 -0700297 (unsigned long long)rhsa->base_address,
298 rhsa->proximity_domain);
299 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700300 }
301}
302
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700303/**
304 * dmar_table_detect - checks to see if the platform supports DMAR devices
305 */
306static int __init dmar_table_detect(void)
307{
308 acpi_status status = AE_OK;
309
310 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800311 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
312 (struct acpi_table_header **)&dmar_tbl,
313 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700314
315 if (ACPI_SUCCESS(status) && !dmar_tbl) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400316 pr_warn("Unable to map DMAR\n");
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700317 status = AE_NOT_FOUND;
318 }
319
320 return (ACPI_SUCCESS(status) ? 1 : 0);
321}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700322
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700323/**
324 * parse_dmar_table - parses the DMA reporting table
325 */
326static int __init
327parse_dmar_table(void)
328{
329 struct acpi_table_dmar *dmar;
330 struct acpi_dmar_header *entry_header;
331 int ret = 0;
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800332 int drhd_count = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700333
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700334 /*
335 * Do it again, earlier dmar_tbl mapping could be mapped with
336 * fixed map.
337 */
338 dmar_table_detect();
339
Joseph Cihulaa59b50e2009-06-30 19:31:10 -0700340 /*
341 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
342 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
343 */
344 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
345
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700346 dmar = (struct acpi_table_dmar *)dmar_tbl;
347 if (!dmar)
348 return -ENODEV;
349
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700350 if (dmar->width < PAGE_SHIFT - 1) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400351 pr_warn("Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700352 return -EINVAL;
353 }
354
Donald Dutilee9071b02012-06-08 17:13:11 -0400355 pr_info("Host address width %d\n", dmar->width + 1);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700356
357 entry_header = (struct acpi_dmar_header *)(dmar + 1);
358 while (((unsigned long)entry_header) <
359 (((unsigned long)dmar) + dmar_tbl->length)) {
Tony Battersby084eb962009-02-11 13:24:19 -0800360 /* Avoid looping forever on bad ACPI tables */
361 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400362 pr_warn("Invalid 0-length structure\n");
Tony Battersby084eb962009-02-11 13:24:19 -0800363 ret = -EINVAL;
364 break;
365 }
366
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700367 dmar_table_print_dmar_entry(entry_header);
368
369 switch (entry_header->type) {
370 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800371 drhd_count++;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700372 ret = dmar_parse_one_drhd(entry_header);
373 break;
374 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
375 ret = dmar_parse_one_rmrr(entry_header);
376 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800377 case ACPI_DMAR_TYPE_ATSR:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800378 ret = dmar_parse_one_atsr(entry_header);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800379 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700380 case ACPI_DMAR_HARDWARE_AFFINITY:
David Woodhouseaa697072009-10-07 12:18:00 +0100381#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700382 ret = dmar_parse_one_rhsa(entry_header);
David Woodhouseaa697072009-10-07 12:18:00 +0100383#endif
Roland Dreier17b60972009-09-24 12:14:00 -0700384 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700385 default:
Donald Dutilee9071b02012-06-08 17:13:11 -0400386 pr_warn("Unknown DMAR structure type %d\n",
Roland Dreier4de75cf2009-09-24 01:01:29 +0100387 entry_header->type);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700388 ret = 0; /* for forward compatibility */
389 break;
390 }
391 if (ret)
392 break;
393
394 entry_header = ((void *)entry_header + entry_header->length);
395 }
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800396 if (drhd_count == 0)
397 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700398 return ret;
399}
400
Yinghaidda56542010-04-09 01:07:55 +0100401static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700402 struct pci_dev *dev)
403{
404 int index;
405
406 while (dev) {
407 for (index = 0; index < cnt; index++)
408 if (dev == devices[index])
409 return 1;
410
411 /* Check our parent */
412 dev = dev->bus->self;
413 }
414
415 return 0;
416}
417
418struct dmar_drhd_unit *
419dmar_find_matched_drhd_unit(struct pci_dev *dev)
420{
Yu Zhao2e824f72008-12-22 16:54:58 +0800421 struct dmar_drhd_unit *dmaru = NULL;
422 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700423
Yinghaidda56542010-04-09 01:07:55 +0100424 dev = pci_physfn(dev);
425
Yijing Wang8b161f02013-10-31 17:25:16 +0800426 for_each_drhd_unit(dmaru) {
Yu Zhao2e824f72008-12-22 16:54:58 +0800427 drhd = container_of(dmaru->hdr,
428 struct acpi_dmar_hardware_unit,
429 header);
430
431 if (dmaru->include_all &&
432 drhd->segment == pci_domain_nr(dev->bus))
433 return dmaru;
434
435 if (dmar_pci_device_match(dmaru->devices,
436 dmaru->devices_cnt, dev))
437 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700438 }
439
440 return NULL;
441}
442
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700443int __init dmar_dev_scope_init(void)
444{
Suresh Siddhac2c72862011-08-23 17:05:19 -0700445 static int dmar_dev_scope_initialized;
Jiang Liua868e6b2014-01-06 14:18:20 +0800446 struct dmar_drhd_unit *drhd;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700447 int ret = -ENODEV;
448
Suresh Siddhac2c72862011-08-23 17:05:19 -0700449 if (dmar_dev_scope_initialized)
450 return dmar_dev_scope_initialized;
451
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700452 if (list_empty(&dmar_drhd_units))
453 goto fail;
454
Jiang Liua868e6b2014-01-06 14:18:20 +0800455 list_for_each_entry(drhd, &dmar_drhd_units, list) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700456 ret = dmar_parse_dev(drhd);
457 if (ret)
Suresh Siddhac2c72862011-08-23 17:05:19 -0700458 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700459 }
460
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700461 ret = dmar_parse_rmrr_atsr_dev();
462 if (ret)
463 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700464
Suresh Siddhac2c72862011-08-23 17:05:19 -0700465 dmar_dev_scope_initialized = 1;
466 return 0;
467
468fail:
469 dmar_dev_scope_initialized = ret;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700470 return ret;
471}
472
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700473
474int __init dmar_table_init(void)
475{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700476 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800477 int ret;
478
Jiang Liucc053012014-01-06 14:18:24 +0800479 if (dmar_table_initialized == 0) {
480 ret = parse_dmar_table();
481 if (ret < 0) {
482 if (ret != -ENODEV)
483 pr_info("parse DMAR table failure.\n");
484 } else if (list_empty(&dmar_drhd_units)) {
485 pr_info("No DMAR devices found\n");
486 ret = -ENODEV;
487 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700488
Jiang Liucc053012014-01-06 14:18:24 +0800489 if (ret < 0)
490 dmar_table_initialized = ret;
491 else
492 dmar_table_initialized = 1;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800493 }
494
Jiang Liucc053012014-01-06 14:18:24 +0800495 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700496}
497
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100498static void warn_invalid_dmar(u64 addr, const char *message)
499{
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100500 WARN_TAINT_ONCE(
501 1, TAINT_FIRMWARE_WORKAROUND,
502 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
503 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
504 addr, message,
505 dmi_get_system_info(DMI_BIOS_VENDOR),
506 dmi_get_system_info(DMI_BIOS_VERSION),
507 dmi_get_system_info(DMI_PRODUCT_VERSION));
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100508}
David Woodhouse6ecbf012009-12-02 09:20:27 +0000509
Rashika Kheria21004dc2013-12-18 12:01:46 +0530510static int __init check_zero_address(void)
David Woodhouse86cf8982009-11-09 22:15:15 +0000511{
512 struct acpi_table_dmar *dmar;
513 struct acpi_dmar_header *entry_header;
514 struct acpi_dmar_hardware_unit *drhd;
515
516 dmar = (struct acpi_table_dmar *)dmar_tbl;
517 entry_header = (struct acpi_dmar_header *)(dmar + 1);
518
519 while (((unsigned long)entry_header) <
520 (((unsigned long)dmar) + dmar_tbl->length)) {
521 /* Avoid looping forever on bad ACPI tables */
522 if (entry_header->length == 0) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400523 pr_warn("Invalid 0-length structure\n");
David Woodhouse86cf8982009-11-09 22:15:15 +0000524 return 0;
525 }
526
527 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
Chris Wright2c992202009-12-02 09:17:13 +0000528 void __iomem *addr;
529 u64 cap, ecap;
530
David Woodhouse86cf8982009-11-09 22:15:15 +0000531 drhd = (void *)entry_header;
532 if (!drhd->address) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100533 warn_invalid_dmar(0, "");
Chris Wright2c992202009-12-02 09:17:13 +0000534 goto failed;
David Woodhouse86cf8982009-11-09 22:15:15 +0000535 }
Chris Wright2c992202009-12-02 09:17:13 +0000536
537 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
538 if (!addr ) {
539 printk("IOMMU: can't validate: %llx\n", drhd->address);
540 goto failed;
541 }
542 cap = dmar_readq(addr + DMAR_CAP_REG);
543 ecap = dmar_readq(addr + DMAR_ECAP_REG);
544 early_iounmap(addr, VTD_PAGE_SIZE);
545 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100546 warn_invalid_dmar(drhd->address,
547 " returns all ones");
Chris Wright2c992202009-12-02 09:17:13 +0000548 goto failed;
549 }
David Woodhouse86cf8982009-11-09 22:15:15 +0000550 }
551
552 entry_header = ((void *)entry_header + entry_header->length);
553 }
554 return 1;
Chris Wright2c992202009-12-02 09:17:13 +0000555
556failed:
Chris Wright2c992202009-12-02 09:17:13 +0000557 return 0;
David Woodhouse86cf8982009-11-09 22:15:15 +0000558}
559
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400560int __init detect_intel_iommu(void)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700561{
562 int ret;
563
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700564 ret = dmar_table_detect();
David Woodhouse86cf8982009-11-09 22:15:15 +0000565 if (ret)
566 ret = check_zero_address();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700567 {
Linus Torvalds11bd04f2009-12-11 12:18:16 -0800568 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
Suresh Siddha2ae21012008-07-10 11:16:43 -0700569 iommu_detected = 1;
Chris Wright5d990b62009-12-04 12:15:21 -0800570 /* Make sure ACS will be enabled */
571 pci_request_acs();
572 }
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700573
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900574#ifdef CONFIG_X86
575 if (ret)
576 x86_init.iommu.iommu_init = intel_iommu_init;
577#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700578 }
Jiang Liub707cb02014-01-06 14:18:26 +0800579 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700580 dmar_tbl = NULL;
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400581
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -0400582 return ret ? 1 : -ENODEV;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700583}
584
585
Donald Dutile6f5cf522012-06-04 17:29:02 -0400586static void unmap_iommu(struct intel_iommu *iommu)
587{
588 iounmap(iommu->reg);
589 release_mem_region(iommu->reg_phys, iommu->reg_size);
590}
591
592/**
593 * map_iommu: map the iommu's registers
594 * @iommu: the iommu to map
595 * @phys_addr: the physical address of the base resgister
Donald Dutilee9071b02012-06-08 17:13:11 -0400596 *
Donald Dutile6f5cf522012-06-04 17:29:02 -0400597 * Memory map the iommu's registers. Start w/ a single page, and
Donald Dutilee9071b02012-06-08 17:13:11 -0400598 * possibly expand if that turns out to be insufficent.
Donald Dutile6f5cf522012-06-04 17:29:02 -0400599 */
600static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
601{
602 int map_size, err=0;
603
604 iommu->reg_phys = phys_addr;
605 iommu->reg_size = VTD_PAGE_SIZE;
606
607 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
608 pr_err("IOMMU: can't reserve memory\n");
609 err = -EBUSY;
610 goto out;
611 }
612
613 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
614 if (!iommu->reg) {
615 pr_err("IOMMU: can't map the region\n");
616 err = -ENOMEM;
617 goto release;
618 }
619
620 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
621 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
622
623 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
624 err = -EINVAL;
625 warn_invalid_dmar(phys_addr, " returns all ones");
626 goto unmap;
627 }
628
629 /* the registers might be more than one page */
630 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
631 cap_max_fault_reg_offset(iommu->cap));
632 map_size = VTD_PAGE_ALIGN(map_size);
633 if (map_size > iommu->reg_size) {
634 iounmap(iommu->reg);
635 release_mem_region(iommu->reg_phys, iommu->reg_size);
636 iommu->reg_size = map_size;
637 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
638 iommu->name)) {
639 pr_err("IOMMU: can't reserve memory\n");
640 err = -EBUSY;
641 goto out;
642 }
643 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
644 if (!iommu->reg) {
645 pr_err("IOMMU: can't map the region\n");
646 err = -ENOMEM;
647 goto release;
648 }
649 }
650 err = 0;
651 goto out;
652
653unmap:
654 iounmap(iommu->reg);
655release:
656 release_mem_region(iommu->reg_phys, iommu->reg_size);
657out:
658 return err;
659}
660
Jiang Liu694835d2014-01-06 14:18:16 +0800661static int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700662{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700663 struct intel_iommu *iommu;
Takao Indoh3a93c842013-04-23 17:35:03 +0900664 u32 ver, sts;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700665 static int iommu_allocated = 0;
Joerg Roedel43f73922009-01-03 23:56:27 +0100666 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700667 int msagaw = 0;
Donald Dutile6f5cf522012-06-04 17:29:02 -0400668 int err;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700669
David Woodhouse6ecbf012009-12-02 09:20:27 +0000670 if (!drhd->reg_base_addr) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100671 warn_invalid_dmar(0, "");
David Woodhouse6ecbf012009-12-02 09:20:27 +0000672 return -EINVAL;
673 }
674
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700675 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
676 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700677 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700678
679 iommu->seq_id = iommu_allocated++;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700680 sprintf (iommu->name, "dmar%d", iommu->seq_id);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700681
Donald Dutile6f5cf522012-06-04 17:29:02 -0400682 err = map_iommu(iommu, drhd->reg_base_addr);
683 if (err) {
684 pr_err("IOMMU: failed to map %s\n", iommu->name);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700685 goto error;
686 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700687
Donald Dutile6f5cf522012-06-04 17:29:02 -0400688 err = -EINVAL;
Weidong Han1b573682008-12-08 15:34:06 +0800689 agaw = iommu_calculate_agaw(iommu);
690 if (agaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400691 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
692 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100693 goto err_unmap;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700694 }
695 msagaw = iommu_calculate_max_sagaw(iommu);
696 if (msagaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400697 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +0800698 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100699 goto err_unmap;
Weidong Han1b573682008-12-08 15:34:06 +0800700 }
701 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700702 iommu->msagaw = msagaw;
Weidong Han1b573682008-12-08 15:34:06 +0800703
Suresh Siddhaee34b322009-10-02 11:01:21 -0700704 iommu->node = -1;
705
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700706 ver = readl(iommu->reg + DMAR_VER_REG);
Yinghai Lu680a7522010-04-08 19:58:23 +0100707 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
708 iommu->seq_id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700709 (unsigned long long)drhd->reg_base_addr,
710 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
711 (unsigned long long)iommu->cap,
712 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700713
Takao Indoh3a93c842013-04-23 17:35:03 +0900714 /* Reflect status in gcmd */
715 sts = readl(iommu->reg + DMAR_GSTS_REG);
716 if (sts & DMA_GSTS_IRES)
717 iommu->gcmd |= DMA_GCMD_IRE;
718 if (sts & DMA_GSTS_TES)
719 iommu->gcmd |= DMA_GCMD_TE;
720 if (sts & DMA_GSTS_QIES)
721 iommu->gcmd |= DMA_GCMD_QIE;
722
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200723 raw_spin_lock_init(&iommu->register_lock);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700724
725 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700726 return 0;
David Woodhouse08155652009-08-04 09:17:20 +0100727
728 err_unmap:
Donald Dutile6f5cf522012-06-04 17:29:02 -0400729 unmap_iommu(iommu);
David Woodhouse08155652009-08-04 09:17:20 +0100730 error:
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700731 kfree(iommu);
Donald Dutile6f5cf522012-06-04 17:29:02 -0400732 return err;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700733}
734
Jiang Liua868e6b2014-01-06 14:18:20 +0800735static void free_iommu(struct intel_iommu *iommu)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700736{
Jiang Liua868e6b2014-01-06 14:18:20 +0800737 if (iommu->irq) {
738 free_irq(iommu->irq, iommu);
739 irq_set_handler_data(iommu->irq, NULL);
740 destroy_irq(iommu->irq);
741 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700742
Jiang Liua84da702014-01-06 14:18:23 +0800743 if (iommu->qi) {
744 free_page((unsigned long)iommu->qi->desc);
745 kfree(iommu->qi->desc_status);
746 kfree(iommu->qi);
747 }
748
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700749 if (iommu->reg)
Donald Dutile6f5cf522012-06-04 17:29:02 -0400750 unmap_iommu(iommu);
751
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700752 kfree(iommu);
753}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700754
755/*
756 * Reclaim all the submitted descriptors which have completed its work.
757 */
758static inline void reclaim_free_desc(struct q_inval *qi)
759{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800760 while (qi->desc_status[qi->free_tail] == QI_DONE ||
761 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -0700762 qi->desc_status[qi->free_tail] = QI_FREE;
763 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
764 qi->free_cnt++;
765 }
766}
767
Yu Zhao704126a2009-01-04 16:28:52 +0800768static int qi_check_fault(struct intel_iommu *iommu, int index)
769{
770 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800771 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +0800772 struct q_inval *qi = iommu->qi;
773 int wait_index = (index + 1) % QI_LENGTH;
774
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800775 if (qi->desc_status[wait_index] == QI_ABORT)
776 return -EAGAIN;
777
Yu Zhao704126a2009-01-04 16:28:52 +0800778 fault = readl(iommu->reg + DMAR_FSTS_REG);
779
780 /*
781 * If IQE happens, the head points to the descriptor associated
782 * with the error. No new descriptors are fetched until the IQE
783 * is cleared.
784 */
785 if (fault & DMA_FSTS_IQE) {
786 head = readl(iommu->reg + DMAR_IQH_REG);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800787 if ((head >> DMAR_IQ_SHIFT) == index) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400788 pr_err("VT-d detected invalid descriptor: "
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800789 "low=%llx, high=%llx\n",
790 (unsigned long long)qi->desc[index].low,
791 (unsigned long long)qi->desc[index].high);
Yu Zhao704126a2009-01-04 16:28:52 +0800792 memcpy(&qi->desc[index], &qi->desc[wait_index],
793 sizeof(struct qi_desc));
794 __iommu_flush_cache(iommu, &qi->desc[index],
795 sizeof(struct qi_desc));
796 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
797 return -EINVAL;
798 }
799 }
800
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800801 /*
802 * If ITE happens, all pending wait_desc commands are aborted.
803 * No new descriptors are fetched until the ITE is cleared.
804 */
805 if (fault & DMA_FSTS_ITE) {
806 head = readl(iommu->reg + DMAR_IQH_REG);
807 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
808 head |= 1;
809 tail = readl(iommu->reg + DMAR_IQT_REG);
810 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
811
812 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
813
814 do {
815 if (qi->desc_status[head] == QI_IN_USE)
816 qi->desc_status[head] = QI_ABORT;
817 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
818 } while (head != tail);
819
820 if (qi->desc_status[wait_index] == QI_ABORT)
821 return -EAGAIN;
822 }
823
824 if (fault & DMA_FSTS_ICE)
825 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
826
Yu Zhao704126a2009-01-04 16:28:52 +0800827 return 0;
828}
829
Suresh Siddhafe962e92008-07-10 11:16:42 -0700830/*
831 * Submit the queued invalidation descriptor to the remapping
832 * hardware unit and wait for its completion.
833 */
Yu Zhao704126a2009-01-04 16:28:52 +0800834int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -0700835{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800836 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700837 struct q_inval *qi = iommu->qi;
838 struct qi_desc *hw, wait_desc;
839 int wait_index, index;
840 unsigned long flags;
841
842 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +0800843 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700844
845 hw = qi->desc;
846
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800847restart:
848 rc = 0;
849
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200850 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700851 while (qi->free_cnt < 3) {
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200852 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700853 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200854 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700855 }
856
857 index = qi->free_head;
858 wait_index = (index + 1) % QI_LENGTH;
859
860 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
861
862 hw[index] = *desc;
863
Yu Zhao704126a2009-01-04 16:28:52 +0800864 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
865 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700866 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
867
868 hw[wait_index] = wait_desc;
869
870 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
871 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
872
873 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
874 qi->free_cnt -= 2;
875
Suresh Siddhafe962e92008-07-10 11:16:42 -0700876 /*
877 * update the HW tail register indicating the presence of
878 * new descriptors.
879 */
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800880 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700881
882 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700883 /*
884 * We will leave the interrupts disabled, to prevent interrupt
885 * context to queue another cmd while a cmd is already submitted
886 * and waiting for completion on this cpu. This is to avoid
887 * a deadlock where the interrupt context can wait indefinitely
888 * for free slots in the queue.
889 */
Yu Zhao704126a2009-01-04 16:28:52 +0800890 rc = qi_check_fault(iommu, index);
891 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800892 break;
Yu Zhao704126a2009-01-04 16:28:52 +0800893
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200894 raw_spin_unlock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700895 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200896 raw_spin_lock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700897 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800898
899 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700900
901 reclaim_free_desc(qi);
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200902 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800903
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800904 if (rc == -EAGAIN)
905 goto restart;
906
Yu Zhao704126a2009-01-04 16:28:52 +0800907 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700908}
909
910/*
911 * Flush the global interrupt entry cache.
912 */
913void qi_global_iec(struct intel_iommu *iommu)
914{
915 struct qi_desc desc;
916
917 desc.low = QI_IEC_TYPE;
918 desc.high = 0;
919
Yu Zhao704126a2009-01-04 16:28:52 +0800920 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -0700921 qi_submit_sync(&desc, iommu);
922}
923
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100924void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
925 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700926{
Youquan Song3481f212008-10-16 16:31:55 -0700927 struct qi_desc desc;
928
Youquan Song3481f212008-10-16 16:31:55 -0700929 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
930 | QI_CC_GRAN(type) | QI_CC_TYPE;
931 desc.high = 0;
932
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100933 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700934}
935
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100936void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
937 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700938{
939 u8 dw = 0, dr = 0;
940
941 struct qi_desc desc;
942 int ih = 0;
943
Youquan Song3481f212008-10-16 16:31:55 -0700944 if (cap_write_drain(iommu->cap))
945 dw = 1;
946
947 if (cap_read_drain(iommu->cap))
948 dr = 1;
949
950 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
951 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
952 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
953 | QI_IOTLB_AM(size_order);
954
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100955 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700956}
957
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800958void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
959 u64 addr, unsigned mask)
960{
961 struct qi_desc desc;
962
963 if (mask) {
964 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
965 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
966 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
967 } else
968 desc.high = QI_DEV_IOTLB_ADDR(addr);
969
970 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
971 qdep = 0;
972
973 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
974 QI_DIOTLB_TYPE;
975
976 qi_submit_sync(&desc, iommu);
977}
978
Suresh Siddhafe962e92008-07-10 11:16:42 -0700979/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700980 * Disable Queued Invalidation interface.
981 */
982void dmar_disable_qi(struct intel_iommu *iommu)
983{
984 unsigned long flags;
985 u32 sts;
986 cycles_t start_time = get_cycles();
987
988 if (!ecap_qis(iommu->ecap))
989 return;
990
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200991 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700992
993 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
994 if (!(sts & DMA_GSTS_QIES))
995 goto end;
996
997 /*
998 * Give a chance to HW to complete the pending invalidation requests.
999 */
1000 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1001 readl(iommu->reg + DMAR_IQH_REG)) &&
1002 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1003 cpu_relax();
1004
1005 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001006 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1007
1008 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1009 !(sts & DMA_GSTS_QIES), sts);
1010end:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001011 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001012}
1013
1014/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001015 * Enable queued invalidation.
1016 */
1017static void __dmar_enable_qi(struct intel_iommu *iommu)
1018{
David Woodhousec416daa2009-05-10 20:30:58 +01001019 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001020 unsigned long flags;
1021 struct q_inval *qi = iommu->qi;
1022
1023 qi->free_head = qi->free_tail = 0;
1024 qi->free_cnt = QI_LENGTH;
1025
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001026 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001027
1028 /* write zero to the tail reg */
1029 writel(0, iommu->reg + DMAR_IQT_REG);
1030
1031 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1032
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001033 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +01001034 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001035
1036 /* Make sure hardware complete it */
1037 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1038
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001039 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001040}
1041
1042/*
Suresh Siddhafe962e92008-07-10 11:16:42 -07001043 * Enable Queued Invalidation interface. This is a must to support
1044 * interrupt-remapping. Also used by DMA-remapping, which replaces
1045 * register based IOTLB invalidation.
1046 */
1047int dmar_enable_qi(struct intel_iommu *iommu)
1048{
Suresh Siddhafe962e92008-07-10 11:16:42 -07001049 struct q_inval *qi;
Suresh Siddha751cafe2009-10-02 11:01:22 -07001050 struct page *desc_page;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001051
1052 if (!ecap_qis(iommu->ecap))
1053 return -ENOENT;
1054
1055 /*
1056 * queued invalidation is already setup and enabled.
1057 */
1058 if (iommu->qi)
1059 return 0;
1060
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001061 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001062 if (!iommu->qi)
1063 return -ENOMEM;
1064
1065 qi = iommu->qi;
1066
Suresh Siddha751cafe2009-10-02 11:01:22 -07001067
1068 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1069 if (!desc_page) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001070 kfree(qi);
Jiang Liub707cb02014-01-06 14:18:26 +08001071 iommu->qi = NULL;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001072 return -ENOMEM;
1073 }
1074
Suresh Siddha751cafe2009-10-02 11:01:22 -07001075 qi->desc = page_address(desc_page);
1076
Hannes Reinecke37a40712013-02-06 09:50:10 +01001077 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001078 if (!qi->desc_status) {
1079 free_page((unsigned long) qi->desc);
1080 kfree(qi);
Jiang Liub707cb02014-01-06 14:18:26 +08001081 iommu->qi = NULL;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001082 return -ENOMEM;
1083 }
1084
1085 qi->free_head = qi->free_tail = 0;
1086 qi->free_cnt = QI_LENGTH;
1087
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001088 raw_spin_lock_init(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001089
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001090 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001091
1092 return 0;
1093}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001094
1095/* iommu interrupt handling. Most stuff are MSI-like. */
1096
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001097enum faulttype {
1098 DMA_REMAP,
1099 INTR_REMAP,
1100 UNKNOWN,
1101};
1102
1103static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001104{
1105 "Software",
1106 "Present bit in root entry is clear",
1107 "Present bit in context entry is clear",
1108 "Invalid context entry",
1109 "Access beyond MGAW",
1110 "PTE Write access is not set",
1111 "PTE Read access is not set",
1112 "Next page table ptr is invalid",
1113 "Root table address invalid",
1114 "Context table ptr is invalid",
1115 "non-zero reserved fields in RTP",
1116 "non-zero reserved fields in CTP",
1117 "non-zero reserved fields in PTE",
Li, Zhen-Hua4ecccd92013-03-06 10:43:17 +08001118 "PCE for translation request specifies blocking",
Suresh Siddha0ac24912009-03-16 17:04:54 -07001119};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001120
Suresh Siddha95a02e92012-03-30 11:47:07 -07001121static const char *irq_remap_fault_reasons[] =
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001122{
1123 "Detected reserved fields in the decoded interrupt-remapped request",
1124 "Interrupt index exceeded the interrupt-remapping table size",
1125 "Present field in the IRTE entry is clear",
1126 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1127 "Detected reserved fields in the IRTE entry",
1128 "Blocked a compatibility format interrupt request",
1129 "Blocked an interrupt request due to source-id verification failure",
1130};
1131
Rashika Kheria21004dc2013-12-18 12:01:46 +05301132static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001133{
Dan Carpenterfefe1ed2012-05-13 20:09:38 +03001134 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1135 ARRAY_SIZE(irq_remap_fault_reasons))) {
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001136 *fault_type = INTR_REMAP;
Suresh Siddha95a02e92012-03-30 11:47:07 -07001137 return irq_remap_fault_reasons[fault_reason - 0x20];
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001138 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1139 *fault_type = DMA_REMAP;
1140 return dma_remap_fault_reasons[fault_reason];
1141 } else {
1142 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001143 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001144 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001145}
1146
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001147void dmar_msi_unmask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001148{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001149 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001150 unsigned long flag;
1151
1152 /* unmask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001153 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001154 writel(0, iommu->reg + DMAR_FECTL_REG);
1155 /* Read a reg to force flush the post write */
1156 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001157 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001158}
1159
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001160void dmar_msi_mask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001161{
1162 unsigned long flag;
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001163 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001164
1165 /* mask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001166 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001167 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1168 /* Read a reg to force flush the post write */
1169 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001170 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001171}
1172
1173void dmar_msi_write(int irq, struct msi_msg *msg)
1174{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001175 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001176 unsigned long flag;
1177
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001178 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001179 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1180 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1181 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001182 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001183}
1184
1185void dmar_msi_read(int irq, struct msi_msg *msg)
1186{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001187 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001188 unsigned long flag;
1189
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001190 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001191 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1192 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1193 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001194 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001195}
1196
1197static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1198 u8 fault_reason, u16 source_id, unsigned long long addr)
1199{
1200 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001201 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001202
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001203 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001204
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001205 if (fault_type == INTR_REMAP)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001206 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001207 "fault index %llx\n"
1208 "INTR-REMAP:[fault reason %02d] %s\n",
1209 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1210 PCI_FUNC(source_id & 0xFF), addr >> 48,
1211 fault_reason, reason);
1212 else
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001213 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001214 "fault addr %llx \n"
1215 "DMAR:[fault reason %02d] %s\n",
1216 (type ? "DMA Read" : "DMA Write"),
1217 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1218 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001219 return 0;
1220}
1221
1222#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001223irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001224{
1225 struct intel_iommu *iommu = dev_id;
1226 int reg, fault_index;
1227 u32 fault_status;
1228 unsigned long flag;
1229
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001230 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001231 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001232 if (fault_status)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001233 pr_err("DRHD: handling fault status reg %x\n", fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001234
1235 /* TBD: ignore advanced fault log currently */
1236 if (!(fault_status & DMA_FSTS_PPF))
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001237 goto unlock_exit;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001238
1239 fault_index = dma_fsts_fault_record_index(fault_status);
1240 reg = cap_fault_reg_offset(iommu->cap);
1241 while (1) {
1242 u8 fault_reason;
1243 u16 source_id;
1244 u64 guest_addr;
1245 int type;
1246 u32 data;
1247
1248 /* highest 32 bits */
1249 data = readl(iommu->reg + reg +
1250 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1251 if (!(data & DMA_FRCD_F))
1252 break;
1253
1254 fault_reason = dma_frcd_fault_reason(data);
1255 type = dma_frcd_type(data);
1256
1257 data = readl(iommu->reg + reg +
1258 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1259 source_id = dma_frcd_source_id(data);
1260
1261 guest_addr = dmar_readq(iommu->reg + reg +
1262 fault_index * PRIMARY_FAULT_REG_LEN);
1263 guest_addr = dma_frcd_page_addr(guest_addr);
1264 /* clear the fault */
1265 writel(DMA_FRCD_F, iommu->reg + reg +
1266 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1267
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001268 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001269
1270 dmar_fault_do_one(iommu, type, fault_reason,
1271 source_id, guest_addr);
1272
1273 fault_index++;
Troy Heber8211a7b2009-08-19 15:26:11 -06001274 if (fault_index >= cap_num_fault_regs(iommu->cap))
Suresh Siddha0ac24912009-03-16 17:04:54 -07001275 fault_index = 0;
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001276 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001277 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001278
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001279 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1280
1281unlock_exit:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001282 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001283 return IRQ_HANDLED;
1284}
1285
1286int dmar_set_interrupt(struct intel_iommu *iommu)
1287{
1288 int irq, ret;
1289
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001290 /*
1291 * Check if the fault interrupt is already initialized.
1292 */
1293 if (iommu->irq)
1294 return 0;
1295
Suresh Siddha0ac24912009-03-16 17:04:54 -07001296 irq = create_irq();
1297 if (!irq) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001298 pr_err("IOMMU: no free vectors\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001299 return -EINVAL;
1300 }
1301
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001302 irq_set_handler_data(irq, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001303 iommu->irq = irq;
1304
1305 ret = arch_setup_dmar_msi(irq);
1306 if (ret) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001307 irq_set_handler_data(irq, NULL);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001308 iommu->irq = 0;
1309 destroy_irq(irq);
Chris Wrightdd726432009-05-13 15:55:52 -07001310 return ret;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001311 }
1312
Thomas Gleixner477694e2011-07-19 16:25:42 +02001313 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001314 if (ret)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001315 pr_err("IOMMU: can't request irq\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001316 return ret;
1317}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001318
1319int __init enable_drhd_fault_handling(void)
1320{
1321 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08001322 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001323
1324 /*
1325 * Enable fault control interrupt.
1326 */
Jiang Liu7c919772014-01-06 14:18:18 +08001327 for_each_iommu(iommu, drhd) {
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001328 u32 fault_status;
Jiang Liu7c919772014-01-06 14:18:18 +08001329 int ret = dmar_set_interrupt(iommu);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001330
1331 if (ret) {
Donald Dutilee9071b02012-06-08 17:13:11 -04001332 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001333 (unsigned long long)drhd->reg_base_addr, ret);
1334 return -1;
1335 }
Suresh Siddha7f99d942010-11-30 22:22:29 -08001336
1337 /*
1338 * Clear any previous faults.
1339 */
1340 dmar_fault(iommu->irq, iommu);
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001341 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1342 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001343 }
1344
1345 return 0;
1346}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001347
1348/*
1349 * Re-enable Queued Invalidation interface.
1350 */
1351int dmar_reenable_qi(struct intel_iommu *iommu)
1352{
1353 if (!ecap_qis(iommu->ecap))
1354 return -ENOENT;
1355
1356 if (!iommu->qi)
1357 return -ENOENT;
1358
1359 /*
1360 * First disable queued invalidation.
1361 */
1362 dmar_disable_qi(iommu);
1363 /*
1364 * Then enable queued invalidation again. Since there is no pending
1365 * invalidation requests now, it's safe to re-enable queued
1366 * invalidation.
1367 */
1368 __dmar_enable_qi(iommu);
1369
1370 return 0;
1371}
Youquan Song074835f2009-09-09 12:05:39 -04001372
1373/*
1374 * Check interrupt remapping support in DMAR table description.
1375 */
Luck, Tony0b8973a2009-12-16 22:59:29 +00001376int __init dmar_ir_support(void)
Youquan Song074835f2009-09-09 12:05:39 -04001377{
1378 struct acpi_table_dmar *dmar;
1379 dmar = (struct acpi_table_dmar *)dmar_tbl;
Arnaud Patard4f506e02010-03-25 18:02:58 +00001380 if (!dmar)
1381 return 0;
Youquan Song074835f2009-09-09 12:05:39 -04001382 return dmar->flags & 0x1;
1383}
Jiang Liu694835d2014-01-06 14:18:16 +08001384
Jiang Liua868e6b2014-01-06 14:18:20 +08001385static int __init dmar_free_unused_resources(void)
1386{
1387 struct dmar_drhd_unit *dmaru, *dmaru_n;
1388
1389 /* DMAR units are in use */
1390 if (irq_remapping_enabled || intel_iommu_enabled)
1391 return 0;
1392
1393 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1394 list_del(&dmaru->list);
1395 dmar_free_drhd(dmaru);
1396 }
1397
1398 return 0;
1399}
1400
1401late_initcall(dmar_free_unused_resources);
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -04001402IOMMU_INIT_POST(detect_intel_iommu);