blob: 4484ac089772770ca281f91073e93069ac350b3d [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030031#include <linux/iova.h>
32#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070033#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070034#include <linux/irq.h>
35#include <linux/interrupt.h>
Len Browneb27cae2009-07-06 23:40:19 -040036#include <linux/dmi.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070037
38#undef PREFIX
39#define PREFIX "DMAR:"
40
41/* No locks are needed as DMA remapping hardware unit
42 * list is constructed at boot time and hotplug of
43 * these units are not supported by the architecture.
44 */
45LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070046
47static struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080048static acpi_size dmar_tbl_size;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070049
50static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
51{
52 /*
53 * add INCLUDE_ALL at the tail, so scan the list will find it at
54 * the very end.
55 */
56 if (drhd->include_all)
57 list_add_tail(&drhd->list, &dmar_drhd_units);
58 else
59 list_add(&drhd->list, &dmar_drhd_units);
60}
61
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070062static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63 struct pci_dev **dev, u16 segment)
64{
65 struct pci_bus *bus;
66 struct pci_dev *pdev = NULL;
67 struct acpi_dmar_pci_path *path;
68 int count;
69
70 bus = pci_find_bus(segment, scope->bus);
71 path = (struct acpi_dmar_pci_path *)(scope + 1);
72 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73 / sizeof(struct acpi_dmar_pci_path);
74
75 while (count) {
76 if (pdev)
77 pci_dev_put(pdev);
78 /*
79 * Some BIOSes list non-exist devices in DMAR table, just
80 * ignore it
81 */
82 if (!bus) {
83 printk(KERN_WARNING
84 PREFIX "Device scope bus [%d] not found\n",
85 scope->bus);
86 break;
87 }
88 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
89 if (!pdev) {
90 printk(KERN_WARNING PREFIX
91 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92 segment, bus->number, path->dev, path->fn);
93 break;
94 }
95 path ++;
96 count --;
97 bus = pdev->subordinate;
98 }
99 if (!pdev) {
100 printk(KERN_WARNING PREFIX
101 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->dev, path->fn);
103 *dev = NULL;
104 return 0;
105 }
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109 pci_dev_put(pdev);
110 printk(KERN_WARNING PREFIX
111 "Device scope type does not match for %s\n",
112 pci_name(pdev));
113 return -EINVAL;
114 }
115 *dev = pdev;
116 return 0;
117}
118
119static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120 struct pci_dev ***devices, u16 segment)
121{
122 struct acpi_dmar_device_scope *scope;
123 void * tmp = start;
124 int index;
125 int ret;
126
127 *cnt = 0;
128 while (start < end) {
129 scope = start;
130 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
132 (*cnt)++;
133 else
134 printk(KERN_WARNING PREFIX
135 "Unsupported device scope\n");
136 start += scope->length;
137 }
138 if (*cnt == 0)
139 return 0;
140
141 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
142 if (!*devices)
143 return -ENOMEM;
144
145 start = tmp;
146 index = 0;
147 while (start < end) {
148 scope = start;
149 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151 ret = dmar_parse_one_dev_scope(scope,
152 &(*devices)[index], segment);
153 if (ret) {
154 kfree(*devices);
155 return ret;
156 }
157 index ++;
158 }
159 start += scope->length;
160 }
161
162 return 0;
163}
164
165/**
166 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167 * structure which uniquely represent one DMA remapping hardware unit
168 * present in the platform
169 */
170static int __init
171dmar_parse_one_drhd(struct acpi_dmar_header *header)
172{
173 struct acpi_dmar_hardware_unit *drhd;
174 struct dmar_drhd_unit *dmaru;
175 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700176
David Woodhousee523b382009-04-10 22:27:48 -0700177 drhd = (struct acpi_dmar_hardware_unit *)header;
178 if (!drhd->address) {
179 /* Promote an attitude of violence to a BIOS engineer today */
180 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
181 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
182 dmi_get_system_info(DMI_BIOS_VENDOR),
183 dmi_get_system_info(DMI_BIOS_VERSION),
184 dmi_get_system_info(DMI_PRODUCT_VERSION));
185 return -ENODEV;
186 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700187 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
188 if (!dmaru)
189 return -ENOMEM;
190
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700191 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700192 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf992009-04-04 01:45:37 +0100193 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700194 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
195
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700196 ret = alloc_iommu(dmaru);
197 if (ret) {
198 kfree(dmaru);
199 return ret;
200 }
201 dmar_register_drhd_unit(dmaru);
202 return 0;
203}
204
David Woodhousef82851a2008-10-18 15:43:14 +0100205static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700206{
207 struct acpi_dmar_hardware_unit *drhd;
David Woodhousef82851a2008-10-18 15:43:14 +0100208 int ret = 0;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700209
210 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
211
Yu Zhao2e824f72008-12-22 16:54:58 +0800212 if (dmaru->include_all)
213 return 0;
214
215 ret = dmar_parse_dev_scope((void *)(drhd + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700216 ((void *)drhd) + drhd->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700217 &dmaru->devices_cnt, &dmaru->devices,
218 drhd->segment);
Suresh Siddha1c7d1bc2008-09-03 16:58:35 -0700219 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700220 list_del(&dmaru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700221 kfree(dmaru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700222 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700223 return ret;
224}
225
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700226#ifdef CONFIG_DMAR
227LIST_HEAD(dmar_rmrr_units);
228
229static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
230{
231 list_add(&rmrr->list, &dmar_rmrr_units);
232}
233
234
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700235static int __init
236dmar_parse_one_rmrr(struct acpi_dmar_header *header)
237{
238 struct acpi_dmar_reserved_memory *rmrr;
239 struct dmar_rmrr_unit *rmrru;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700240
241 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
242 if (!rmrru)
243 return -ENOMEM;
244
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700245 rmrru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700246 rmrr = (struct acpi_dmar_reserved_memory *)header;
247 rmrru->base_address = rmrr->base_address;
248 rmrru->end_address = rmrr->end_address;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700249
250 dmar_register_rmrr_unit(rmrru);
251 return 0;
252}
253
254static int __init
255rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
256{
257 struct acpi_dmar_reserved_memory *rmrr;
258 int ret;
259
260 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700261 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700262 ((void *)rmrr) + rmrr->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700263 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
264
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700265 if (ret || (rmrru->devices_cnt == 0)) {
266 list_del(&rmrru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700267 kfree(rmrru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700268 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700269 return ret;
270}
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800271
272static LIST_HEAD(dmar_atsr_units);
273
274static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
275{
276 struct acpi_dmar_atsr *atsr;
277 struct dmar_atsr_unit *atsru;
278
279 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
280 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
281 if (!atsru)
282 return -ENOMEM;
283
284 atsru->hdr = hdr;
285 atsru->include_all = atsr->flags & 0x1;
286
287 list_add(&atsru->list, &dmar_atsr_units);
288
289 return 0;
290}
291
292static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
293{
294 int rc;
295 struct acpi_dmar_atsr *atsr;
296
297 if (atsru->include_all)
298 return 0;
299
300 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
301 rc = dmar_parse_dev_scope((void *)(atsr + 1),
302 (void *)atsr + atsr->header.length,
303 &atsru->devices_cnt, &atsru->devices,
304 atsr->segment);
305 if (rc || !atsru->devices_cnt) {
306 list_del(&atsru->list);
307 kfree(atsru);
308 }
309
310 return rc;
311}
312
313int dmar_find_matched_atsr_unit(struct pci_dev *dev)
314{
315 int i;
316 struct pci_bus *bus;
317 struct acpi_dmar_atsr *atsr;
318 struct dmar_atsr_unit *atsru;
319
320 list_for_each_entry(atsru, &dmar_atsr_units, list) {
321 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
322 if (atsr->segment == pci_domain_nr(dev->bus))
323 goto found;
324 }
325
326 return 0;
327
328found:
329 for (bus = dev->bus; bus; bus = bus->parent) {
330 struct pci_dev *bridge = bus->self;
331
332 if (!bridge || !bridge->is_pcie ||
333 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
334 return 0;
335
336 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
337 for (i = 0; i < atsru->devices_cnt; i++)
338 if (atsru->devices[i] == bridge)
339 return 1;
340 break;
341 }
342 }
343
344 if (atsru->include_all)
345 return 1;
346
347 return 0;
348}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700349#endif
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700350
351static void __init
352dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
353{
354 struct acpi_dmar_hardware_unit *drhd;
355 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800356 struct acpi_dmar_atsr *atsr;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700357
358 switch (header->type) {
359 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800360 drhd = container_of(header, struct acpi_dmar_hardware_unit,
361 header);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700362 printk (KERN_INFO PREFIX
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800363 "DRHD base: %#016Lx flags: %#x\n",
364 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700365 break;
366 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800367 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
368 header);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700369 printk (KERN_INFO PREFIX
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800370 "RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700371 (unsigned long long)rmrr->base_address,
372 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700373 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800374 case ACPI_DMAR_TYPE_ATSR:
375 atsr = container_of(header, struct acpi_dmar_atsr, header);
376 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
377 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700378 }
379}
380
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700381/**
382 * dmar_table_detect - checks to see if the platform supports DMAR devices
383 */
384static int __init dmar_table_detect(void)
385{
386 acpi_status status = AE_OK;
387
388 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800389 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
390 (struct acpi_table_header **)&dmar_tbl,
391 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700392
393 if (ACPI_SUCCESS(status) && !dmar_tbl) {
394 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
395 status = AE_NOT_FOUND;
396 }
397
398 return (ACPI_SUCCESS(status) ? 1 : 0);
399}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700400
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700401/**
402 * parse_dmar_table - parses the DMA reporting table
403 */
404static int __init
405parse_dmar_table(void)
406{
407 struct acpi_table_dmar *dmar;
408 struct acpi_dmar_header *entry_header;
409 int ret = 0;
410
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700411 /*
412 * Do it again, earlier dmar_tbl mapping could be mapped with
413 * fixed map.
414 */
415 dmar_table_detect();
416
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700417 dmar = (struct acpi_table_dmar *)dmar_tbl;
418 if (!dmar)
419 return -ENODEV;
420
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700421 if (dmar->width < PAGE_SHIFT - 1) {
Fenghua Yu093f87d2007-11-21 15:07:14 -0800422 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700423 return -EINVAL;
424 }
425
426 printk (KERN_INFO PREFIX "Host address width %d\n",
427 dmar->width + 1);
428
429 entry_header = (struct acpi_dmar_header *)(dmar + 1);
430 while (((unsigned long)entry_header) <
431 (((unsigned long)dmar) + dmar_tbl->length)) {
Tony Battersby084eb962009-02-11 13:24:19 -0800432 /* Avoid looping forever on bad ACPI tables */
433 if (entry_header->length == 0) {
434 printk(KERN_WARNING PREFIX
435 "Invalid 0-length structure\n");
436 ret = -EINVAL;
437 break;
438 }
439
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700440 dmar_table_print_dmar_entry(entry_header);
441
442 switch (entry_header->type) {
443 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
444 ret = dmar_parse_one_drhd(entry_header);
445 break;
446 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700447#ifdef CONFIG_DMAR
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700448 ret = dmar_parse_one_rmrr(entry_header);
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700449#endif
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700450 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800451 case ACPI_DMAR_TYPE_ATSR:
452#ifdef CONFIG_DMAR
453 ret = dmar_parse_one_atsr(entry_header);
454#endif
455 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700456 default:
457 printk(KERN_WARNING PREFIX
458 "Unknown DMAR structure type\n");
459 ret = 0; /* for forward compatibility */
460 break;
461 }
462 if (ret)
463 break;
464
465 entry_header = ((void *)entry_header + entry_header->length);
466 }
467 return ret;
468}
469
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700470int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
471 struct pci_dev *dev)
472{
473 int index;
474
475 while (dev) {
476 for (index = 0; index < cnt; index++)
477 if (dev == devices[index])
478 return 1;
479
480 /* Check our parent */
481 dev = dev->bus->self;
482 }
483
484 return 0;
485}
486
487struct dmar_drhd_unit *
488dmar_find_matched_drhd_unit(struct pci_dev *dev)
489{
Yu Zhao2e824f72008-12-22 16:54:58 +0800490 struct dmar_drhd_unit *dmaru = NULL;
491 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700492
Yu Zhao2e824f72008-12-22 16:54:58 +0800493 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
494 drhd = container_of(dmaru->hdr,
495 struct acpi_dmar_hardware_unit,
496 header);
497
498 if (dmaru->include_all &&
499 drhd->segment == pci_domain_nr(dev->bus))
500 return dmaru;
501
502 if (dmar_pci_device_match(dmaru->devices,
503 dmaru->devices_cnt, dev))
504 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700505 }
506
507 return NULL;
508}
509
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700510int __init dmar_dev_scope_init(void)
511{
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700512 struct dmar_drhd_unit *drhd, *drhd_n;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700513 int ret = -ENODEV;
514
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700515 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700516 ret = dmar_parse_dev(drhd);
517 if (ret)
518 return ret;
519 }
520
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700521#ifdef CONFIG_DMAR
522 {
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700523 struct dmar_rmrr_unit *rmrr, *rmrr_n;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800524 struct dmar_atsr_unit *atsr, *atsr_n;
525
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700526 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700527 ret = rmrr_parse_dev(rmrr);
528 if (ret)
529 return ret;
530 }
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800531
532 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
533 ret = atsr_parse_dev(atsr);
534 if (ret)
535 return ret;
536 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700537 }
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700538#endif
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700539
540 return ret;
541}
542
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700543
544int __init dmar_table_init(void)
545{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700546 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800547 int ret;
548
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700549 if (dmar_table_initialized)
550 return 0;
551
552 dmar_table_initialized = 1;
553
Fenghua Yu093f87d2007-11-21 15:07:14 -0800554 ret = parse_dmar_table();
555 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700556 if (ret != -ENODEV)
557 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
Fenghua Yu093f87d2007-11-21 15:07:14 -0800558 return ret;
559 }
560
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700561 if (list_empty(&dmar_drhd_units)) {
562 printk(KERN_INFO PREFIX "No DMAR devices found\n");
563 return -ENODEV;
564 }
Fenghua Yu093f87d2007-11-21 15:07:14 -0800565
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700566#ifdef CONFIG_DMAR
Suresh Siddha2d6b5f82008-07-10 11:16:39 -0700567 if (list_empty(&dmar_rmrr_units))
Fenghua Yu093f87d2007-11-21 15:07:14 -0800568 printk(KERN_INFO PREFIX "No RMRR found\n");
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800569
570 if (list_empty(&dmar_atsr_units))
571 printk(KERN_INFO PREFIX "No ATSR found\n");
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700572#endif
Fenghua Yu093f87d2007-11-21 15:07:14 -0800573
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700574#ifdef CONFIG_INTR_REMAP
575 parse_ioapics_under_ir();
576#endif
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700577 return 0;
578}
579
Suresh Siddha2ae21012008-07-10 11:16:43 -0700580void __init detect_intel_iommu(void)
581{
582 int ret;
583
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700584 ret = dmar_table_detect();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700585
Suresh Siddha2ae21012008-07-10 11:16:43 -0700586 {
Youquan Songcacd4212008-10-16 16:31:57 -0700587#ifdef CONFIG_INTR_REMAP
Suresh Siddha1cb11582008-07-10 11:16:51 -0700588 struct acpi_table_dmar *dmar;
589 /*
590 * for now we will disable dma-remapping when interrupt
591 * remapping is enabled.
592 * When support for queued invalidation for IOTLB invalidation
593 * is added, we will not need this any more.
594 */
595 dmar = (struct acpi_table_dmar *) dmar_tbl;
Youquan Songcacd4212008-10-16 16:31:57 -0700596 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
Suresh Siddha1cb11582008-07-10 11:16:51 -0700597 printk(KERN_INFO
598 "Queued invalidation will be enabled to support "
599 "x2apic and Intr-remapping.\n");
Youquan Songcacd4212008-10-16 16:31:57 -0700600#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700601#ifdef CONFIG_DMAR
Suresh Siddha2ae21012008-07-10 11:16:43 -0700602 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
603 !dmar_disabled)
604 iommu_detected = 1;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700605#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700606 }
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800607 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700608 dmar_tbl = NULL;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700609}
610
611
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700612int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700613{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700614 struct intel_iommu *iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700615 int map_size;
616 u32 ver;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700617 static int iommu_allocated = 0;
Joerg Roedel43f73922009-01-03 23:56:27 +0100618 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700619 int msagaw = 0;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700620
621 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
622 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700623 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700624
625 iommu->seq_id = iommu_allocated++;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700626 sprintf (iommu->name, "dmar%d", iommu->seq_id);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700627
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700628 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700629 if (!iommu->reg) {
630 printk(KERN_ERR "IOMMU: can't map the region\n");
631 goto error;
632 }
633 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
634 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
635
Joerg Roedel43f73922009-01-03 23:56:27 +0100636#ifdef CONFIG_DMAR
Weidong Han1b573682008-12-08 15:34:06 +0800637 agaw = iommu_calculate_agaw(iommu);
638 if (agaw < 0) {
639 printk(KERN_ERR
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700640 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
641 iommu->seq_id);
642 goto error;
643 }
644 msagaw = iommu_calculate_max_sagaw(iommu);
645 if (msagaw < 0) {
646 printk(KERN_ERR
647 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +0800648 iommu->seq_id);
649 goto error;
650 }
Joerg Roedel43f73922009-01-03 23:56:27 +0100651#endif
Weidong Han1b573682008-12-08 15:34:06 +0800652 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700653 iommu->msagaw = msagaw;
Weidong Han1b573682008-12-08 15:34:06 +0800654
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700655 /* the registers might be more than one page */
656 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
657 cap_max_fault_reg_offset(iommu->cap));
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700658 map_size = VTD_PAGE_ALIGN(map_size);
659 if (map_size > VTD_PAGE_SIZE) {
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700660 iounmap(iommu->reg);
661 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
662 if (!iommu->reg) {
663 printk(KERN_ERR "IOMMU: can't map the region\n");
664 goto error;
665 }
666 }
667
668 ver = readl(iommu->reg + DMAR_VER_REG);
669 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700670 (unsigned long long)drhd->reg_base_addr,
671 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
672 (unsigned long long)iommu->cap,
673 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700674
675 spin_lock_init(&iommu->register_lock);
676
677 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700678 return 0;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700679error:
680 kfree(iommu);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700681 return -1;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700682}
683
684void free_iommu(struct intel_iommu *iommu)
685{
686 if (!iommu)
687 return;
688
689#ifdef CONFIG_DMAR
690 free_dmar_iommu(iommu);
691#endif
692
693 if (iommu->reg)
694 iounmap(iommu->reg);
695 kfree(iommu);
696}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700697
698/*
699 * Reclaim all the submitted descriptors which have completed its work.
700 */
701static inline void reclaim_free_desc(struct q_inval *qi)
702{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800703 while (qi->desc_status[qi->free_tail] == QI_DONE ||
704 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -0700705 qi->desc_status[qi->free_tail] = QI_FREE;
706 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
707 qi->free_cnt++;
708 }
709}
710
Yu Zhao704126a2009-01-04 16:28:52 +0800711static int qi_check_fault(struct intel_iommu *iommu, int index)
712{
713 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800714 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +0800715 struct q_inval *qi = iommu->qi;
716 int wait_index = (index + 1) % QI_LENGTH;
717
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800718 if (qi->desc_status[wait_index] == QI_ABORT)
719 return -EAGAIN;
720
Yu Zhao704126a2009-01-04 16:28:52 +0800721 fault = readl(iommu->reg + DMAR_FSTS_REG);
722
723 /*
724 * If IQE happens, the head points to the descriptor associated
725 * with the error. No new descriptors are fetched until the IQE
726 * is cleared.
727 */
728 if (fault & DMA_FSTS_IQE) {
729 head = readl(iommu->reg + DMAR_IQH_REG);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800730 if ((head >> DMAR_IQ_SHIFT) == index) {
731 printk(KERN_ERR "VT-d detected invalid descriptor: "
732 "low=%llx, high=%llx\n",
733 (unsigned long long)qi->desc[index].low,
734 (unsigned long long)qi->desc[index].high);
Yu Zhao704126a2009-01-04 16:28:52 +0800735 memcpy(&qi->desc[index], &qi->desc[wait_index],
736 sizeof(struct qi_desc));
737 __iommu_flush_cache(iommu, &qi->desc[index],
738 sizeof(struct qi_desc));
739 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
740 return -EINVAL;
741 }
742 }
743
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800744 /*
745 * If ITE happens, all pending wait_desc commands are aborted.
746 * No new descriptors are fetched until the ITE is cleared.
747 */
748 if (fault & DMA_FSTS_ITE) {
749 head = readl(iommu->reg + DMAR_IQH_REG);
750 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
751 head |= 1;
752 tail = readl(iommu->reg + DMAR_IQT_REG);
753 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
754
755 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
756
757 do {
758 if (qi->desc_status[head] == QI_IN_USE)
759 qi->desc_status[head] = QI_ABORT;
760 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
761 } while (head != tail);
762
763 if (qi->desc_status[wait_index] == QI_ABORT)
764 return -EAGAIN;
765 }
766
767 if (fault & DMA_FSTS_ICE)
768 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
769
Yu Zhao704126a2009-01-04 16:28:52 +0800770 return 0;
771}
772
Suresh Siddhafe962e92008-07-10 11:16:42 -0700773/*
774 * Submit the queued invalidation descriptor to the remapping
775 * hardware unit and wait for its completion.
776 */
Yu Zhao704126a2009-01-04 16:28:52 +0800777int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -0700778{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800779 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700780 struct q_inval *qi = iommu->qi;
781 struct qi_desc *hw, wait_desc;
782 int wait_index, index;
783 unsigned long flags;
784
785 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +0800786 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700787
788 hw = qi->desc;
789
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800790restart:
791 rc = 0;
792
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700793 spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700794 while (qi->free_cnt < 3) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700795 spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700796 cpu_relax();
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700797 spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700798 }
799
800 index = qi->free_head;
801 wait_index = (index + 1) % QI_LENGTH;
802
803 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
804
805 hw[index] = *desc;
806
Yu Zhao704126a2009-01-04 16:28:52 +0800807 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
808 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700809 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
810
811 hw[wait_index] = wait_desc;
812
813 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
814 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
815
816 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
817 qi->free_cnt -= 2;
818
Suresh Siddhafe962e92008-07-10 11:16:42 -0700819 /*
820 * update the HW tail register indicating the presence of
821 * new descriptors.
822 */
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800823 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700824
825 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700826 /*
827 * We will leave the interrupts disabled, to prevent interrupt
828 * context to queue another cmd while a cmd is already submitted
829 * and waiting for completion on this cpu. This is to avoid
830 * a deadlock where the interrupt context can wait indefinitely
831 * for free slots in the queue.
832 */
Yu Zhao704126a2009-01-04 16:28:52 +0800833 rc = qi_check_fault(iommu, index);
834 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800835 break;
Yu Zhao704126a2009-01-04 16:28:52 +0800836
Suresh Siddhafe962e92008-07-10 11:16:42 -0700837 spin_unlock(&qi->q_lock);
838 cpu_relax();
839 spin_lock(&qi->q_lock);
840 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800841
842 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700843
844 reclaim_free_desc(qi);
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700845 spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800846
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800847 if (rc == -EAGAIN)
848 goto restart;
849
Yu Zhao704126a2009-01-04 16:28:52 +0800850 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700851}
852
853/*
854 * Flush the global interrupt entry cache.
855 */
856void qi_global_iec(struct intel_iommu *iommu)
857{
858 struct qi_desc desc;
859
860 desc.low = QI_IEC_TYPE;
861 desc.high = 0;
862
Yu Zhao704126a2009-01-04 16:28:52 +0800863 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -0700864 qi_submit_sync(&desc, iommu);
865}
866
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100867void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
868 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700869{
Youquan Song3481f212008-10-16 16:31:55 -0700870 struct qi_desc desc;
871
Youquan Song3481f212008-10-16 16:31:55 -0700872 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
873 | QI_CC_GRAN(type) | QI_CC_TYPE;
874 desc.high = 0;
875
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100876 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700877}
878
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100879void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
880 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700881{
882 u8 dw = 0, dr = 0;
883
884 struct qi_desc desc;
885 int ih = 0;
886
Youquan Song3481f212008-10-16 16:31:55 -0700887 if (cap_write_drain(iommu->cap))
888 dw = 1;
889
890 if (cap_read_drain(iommu->cap))
891 dr = 1;
892
893 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
894 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
895 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
896 | QI_IOTLB_AM(size_order);
897
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100898 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700899}
900
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800901void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
902 u64 addr, unsigned mask)
903{
904 struct qi_desc desc;
905
906 if (mask) {
907 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
908 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
909 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
910 } else
911 desc.high = QI_DEV_IOTLB_ADDR(addr);
912
913 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
914 qdep = 0;
915
916 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
917 QI_DIOTLB_TYPE;
918
919 qi_submit_sync(&desc, iommu);
920}
921
Suresh Siddhafe962e92008-07-10 11:16:42 -0700922/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700923 * Disable Queued Invalidation interface.
924 */
925void dmar_disable_qi(struct intel_iommu *iommu)
926{
927 unsigned long flags;
928 u32 sts;
929 cycles_t start_time = get_cycles();
930
931 if (!ecap_qis(iommu->ecap))
932 return;
933
934 spin_lock_irqsave(&iommu->register_lock, flags);
935
936 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
937 if (!(sts & DMA_GSTS_QIES))
938 goto end;
939
940 /*
941 * Give a chance to HW to complete the pending invalidation requests.
942 */
943 while ((readl(iommu->reg + DMAR_IQT_REG) !=
944 readl(iommu->reg + DMAR_IQH_REG)) &&
945 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
946 cpu_relax();
947
948 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700949 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
950
951 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
952 !(sts & DMA_GSTS_QIES), sts);
953end:
954 spin_unlock_irqrestore(&iommu->register_lock, flags);
955}
956
957/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700958 * Enable queued invalidation.
959 */
960static void __dmar_enable_qi(struct intel_iommu *iommu)
961{
David Woodhousec416daa2009-05-10 20:30:58 +0100962 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700963 unsigned long flags;
964 struct q_inval *qi = iommu->qi;
965
966 qi->free_head = qi->free_tail = 0;
967 qi->free_cnt = QI_LENGTH;
968
969 spin_lock_irqsave(&iommu->register_lock, flags);
970
971 /* write zero to the tail reg */
972 writel(0, iommu->reg + DMAR_IQT_REG);
973
974 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
975
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700976 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +0100977 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700978
979 /* Make sure hardware complete it */
980 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
981
982 spin_unlock_irqrestore(&iommu->register_lock, flags);
983}
984
985/*
Suresh Siddhafe962e92008-07-10 11:16:42 -0700986 * Enable Queued Invalidation interface. This is a must to support
987 * interrupt-remapping. Also used by DMA-remapping, which replaces
988 * register based IOTLB invalidation.
989 */
990int dmar_enable_qi(struct intel_iommu *iommu)
991{
Suresh Siddhafe962e92008-07-10 11:16:42 -0700992 struct q_inval *qi;
993
994 if (!ecap_qis(iommu->ecap))
995 return -ENOENT;
996
997 /*
998 * queued invalidation is already setup and enabled.
999 */
1000 if (iommu->qi)
1001 return 0;
1002
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001003 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001004 if (!iommu->qi)
1005 return -ENOMEM;
1006
1007 qi = iommu->qi;
1008
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001009 qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
Suresh Siddhafe962e92008-07-10 11:16:42 -07001010 if (!qi->desc) {
1011 kfree(qi);
1012 iommu->qi = 0;
1013 return -ENOMEM;
1014 }
1015
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001016 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001017 if (!qi->desc_status) {
1018 free_page((unsigned long) qi->desc);
1019 kfree(qi);
1020 iommu->qi = 0;
1021 return -ENOMEM;
1022 }
1023
1024 qi->free_head = qi->free_tail = 0;
1025 qi->free_cnt = QI_LENGTH;
1026
1027 spin_lock_init(&qi->q_lock);
1028
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001029 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001030
1031 return 0;
1032}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001033
1034/* iommu interrupt handling. Most stuff are MSI-like. */
1035
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001036enum faulttype {
1037 DMA_REMAP,
1038 INTR_REMAP,
1039 UNKNOWN,
1040};
1041
1042static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001043{
1044 "Software",
1045 "Present bit in root entry is clear",
1046 "Present bit in context entry is clear",
1047 "Invalid context entry",
1048 "Access beyond MGAW",
1049 "PTE Write access is not set",
1050 "PTE Read access is not set",
1051 "Next page table ptr is invalid",
1052 "Root table address invalid",
1053 "Context table ptr is invalid",
1054 "non-zero reserved fields in RTP",
1055 "non-zero reserved fields in CTP",
1056 "non-zero reserved fields in PTE",
1057};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001058
1059static const char *intr_remap_fault_reasons[] =
1060{
1061 "Detected reserved fields in the decoded interrupt-remapped request",
1062 "Interrupt index exceeded the interrupt-remapping table size",
1063 "Present field in the IRTE entry is clear",
1064 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1065 "Detected reserved fields in the IRTE entry",
1066 "Blocked a compatibility format interrupt request",
1067 "Blocked an interrupt request due to source-id verification failure",
1068};
1069
Suresh Siddha0ac24912009-03-16 17:04:54 -07001070#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1071
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001072const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001073{
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001074 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1075 ARRAY_SIZE(intr_remap_fault_reasons))) {
1076 *fault_type = INTR_REMAP;
1077 return intr_remap_fault_reasons[fault_reason - 0x20];
1078 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1079 *fault_type = DMA_REMAP;
1080 return dma_remap_fault_reasons[fault_reason];
1081 } else {
1082 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001083 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001084 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001085}
1086
1087void dmar_msi_unmask(unsigned int irq)
1088{
1089 struct intel_iommu *iommu = get_irq_data(irq);
1090 unsigned long flag;
1091
1092 /* unmask it */
1093 spin_lock_irqsave(&iommu->register_lock, flag);
1094 writel(0, iommu->reg + DMAR_FECTL_REG);
1095 /* Read a reg to force flush the post write */
1096 readl(iommu->reg + DMAR_FECTL_REG);
1097 spin_unlock_irqrestore(&iommu->register_lock, flag);
1098}
1099
1100void dmar_msi_mask(unsigned int irq)
1101{
1102 unsigned long flag;
1103 struct intel_iommu *iommu = get_irq_data(irq);
1104
1105 /* mask it */
1106 spin_lock_irqsave(&iommu->register_lock, flag);
1107 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1108 /* Read a reg to force flush the post write */
1109 readl(iommu->reg + DMAR_FECTL_REG);
1110 spin_unlock_irqrestore(&iommu->register_lock, flag);
1111}
1112
1113void dmar_msi_write(int irq, struct msi_msg *msg)
1114{
1115 struct intel_iommu *iommu = get_irq_data(irq);
1116 unsigned long flag;
1117
1118 spin_lock_irqsave(&iommu->register_lock, flag);
1119 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1120 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1121 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1122 spin_unlock_irqrestore(&iommu->register_lock, flag);
1123}
1124
1125void dmar_msi_read(int irq, struct msi_msg *msg)
1126{
1127 struct intel_iommu *iommu = get_irq_data(irq);
1128 unsigned long flag;
1129
1130 spin_lock_irqsave(&iommu->register_lock, flag);
1131 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1132 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1133 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1134 spin_unlock_irqrestore(&iommu->register_lock, flag);
1135}
1136
1137static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1138 u8 fault_reason, u16 source_id, unsigned long long addr)
1139{
1140 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001141 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001142
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001143 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001144
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001145 if (fault_type == INTR_REMAP)
1146 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1147 "fault index %llx\n"
1148 "INTR-REMAP:[fault reason %02d] %s\n",
1149 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1150 PCI_FUNC(source_id & 0xFF), addr >> 48,
1151 fault_reason, reason);
1152 else
1153 printk(KERN_ERR
1154 "DMAR:[%s] Request device [%02x:%02x.%d] "
1155 "fault addr %llx \n"
1156 "DMAR:[fault reason %02d] %s\n",
1157 (type ? "DMA Read" : "DMA Write"),
1158 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1159 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001160 return 0;
1161}
1162
1163#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001164irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001165{
1166 struct intel_iommu *iommu = dev_id;
1167 int reg, fault_index;
1168 u32 fault_status;
1169 unsigned long flag;
1170
1171 spin_lock_irqsave(&iommu->register_lock, flag);
1172 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001173 if (fault_status)
1174 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1175 fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001176
1177 /* TBD: ignore advanced fault log currently */
1178 if (!(fault_status & DMA_FSTS_PPF))
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001179 goto clear_rest;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001180
1181 fault_index = dma_fsts_fault_record_index(fault_status);
1182 reg = cap_fault_reg_offset(iommu->cap);
1183 while (1) {
1184 u8 fault_reason;
1185 u16 source_id;
1186 u64 guest_addr;
1187 int type;
1188 u32 data;
1189
1190 /* highest 32 bits */
1191 data = readl(iommu->reg + reg +
1192 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1193 if (!(data & DMA_FRCD_F))
1194 break;
1195
1196 fault_reason = dma_frcd_fault_reason(data);
1197 type = dma_frcd_type(data);
1198
1199 data = readl(iommu->reg + reg +
1200 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1201 source_id = dma_frcd_source_id(data);
1202
1203 guest_addr = dmar_readq(iommu->reg + reg +
1204 fault_index * PRIMARY_FAULT_REG_LEN);
1205 guest_addr = dma_frcd_page_addr(guest_addr);
1206 /* clear the fault */
1207 writel(DMA_FRCD_F, iommu->reg + reg +
1208 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1209
1210 spin_unlock_irqrestore(&iommu->register_lock, flag);
1211
1212 dmar_fault_do_one(iommu, type, fault_reason,
1213 source_id, guest_addr);
1214
1215 fault_index++;
1216 if (fault_index > cap_num_fault_regs(iommu->cap))
1217 fault_index = 0;
1218 spin_lock_irqsave(&iommu->register_lock, flag);
1219 }
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001220clear_rest:
1221 /* clear all the other faults */
Suresh Siddha0ac24912009-03-16 17:04:54 -07001222 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001223 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001224
1225 spin_unlock_irqrestore(&iommu->register_lock, flag);
1226 return IRQ_HANDLED;
1227}
1228
1229int dmar_set_interrupt(struct intel_iommu *iommu)
1230{
1231 int irq, ret;
1232
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001233 /*
1234 * Check if the fault interrupt is already initialized.
1235 */
1236 if (iommu->irq)
1237 return 0;
1238
Suresh Siddha0ac24912009-03-16 17:04:54 -07001239 irq = create_irq();
1240 if (!irq) {
1241 printk(KERN_ERR "IOMMU: no free vectors\n");
1242 return -EINVAL;
1243 }
1244
1245 set_irq_data(irq, iommu);
1246 iommu->irq = irq;
1247
1248 ret = arch_setup_dmar_msi(irq);
1249 if (ret) {
1250 set_irq_data(irq, NULL);
1251 iommu->irq = 0;
1252 destroy_irq(irq);
Chris Wrightdd726432009-05-13 15:55:52 -07001253 return ret;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001254 }
1255
Suresh Siddha0ac24912009-03-16 17:04:54 -07001256 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1257 if (ret)
1258 printk(KERN_ERR "IOMMU: can't request irq\n");
1259 return ret;
1260}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001261
1262int __init enable_drhd_fault_handling(void)
1263{
1264 struct dmar_drhd_unit *drhd;
1265
1266 /*
1267 * Enable fault control interrupt.
1268 */
1269 for_each_drhd_unit(drhd) {
1270 int ret;
1271 struct intel_iommu *iommu = drhd->iommu;
1272 ret = dmar_set_interrupt(iommu);
1273
1274 if (ret) {
1275 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1276 " interrupt, ret %d\n",
1277 (unsigned long long)drhd->reg_base_addr, ret);
1278 return -1;
1279 }
1280 }
1281
1282 return 0;
1283}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001284
1285/*
1286 * Re-enable Queued Invalidation interface.
1287 */
1288int dmar_reenable_qi(struct intel_iommu *iommu)
1289{
1290 if (!ecap_qis(iommu->ecap))
1291 return -ENOENT;
1292
1293 if (!iommu->qi)
1294 return -ENOENT;
1295
1296 /*
1297 * First disable queued invalidation.
1298 */
1299 dmar_disable_qi(iommu);
1300 /*
1301 * Then enable queued invalidation again. Since there is no pending
1302 * invalidation requests now, it's safe to re-enable queued
1303 * invalidation.
1304 */
1305 __dmar_enable_qi(iommu);
1306
1307 return 0;
1308}