blob: 44d6c7081b8ffd1761a6f64c106e10ff84e324f9 [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030031#include <linux/iova.h>
32#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070033#include <linux/timer.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070034
35#undef PREFIX
36#define PREFIX "DMAR:"
37
38/* No locks are needed as DMA remapping hardware unit
39 * list is constructed at boot time and hotplug of
40 * these units are not supported by the architecture.
41 */
42LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070043
44static struct acpi_table_header * __initdata dmar_tbl;
45
46static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
47{
48 /*
49 * add INCLUDE_ALL at the tail, so scan the list will find it at
50 * the very end.
51 */
52 if (drhd->include_all)
53 list_add_tail(&drhd->list, &dmar_drhd_units);
54 else
55 list_add(&drhd->list, &dmar_drhd_units);
56}
57
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070058static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
59 struct pci_dev **dev, u16 segment)
60{
61 struct pci_bus *bus;
62 struct pci_dev *pdev = NULL;
63 struct acpi_dmar_pci_path *path;
64 int count;
65
66 bus = pci_find_bus(segment, scope->bus);
67 path = (struct acpi_dmar_pci_path *)(scope + 1);
68 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
69 / sizeof(struct acpi_dmar_pci_path);
70
71 while (count) {
72 if (pdev)
73 pci_dev_put(pdev);
74 /*
75 * Some BIOSes list non-exist devices in DMAR table, just
76 * ignore it
77 */
78 if (!bus) {
79 printk(KERN_WARNING
80 PREFIX "Device scope bus [%d] not found\n",
81 scope->bus);
82 break;
83 }
84 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
85 if (!pdev) {
86 printk(KERN_WARNING PREFIX
87 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
88 segment, bus->number, path->dev, path->fn);
89 break;
90 }
91 path ++;
92 count --;
93 bus = pdev->subordinate;
94 }
95 if (!pdev) {
96 printk(KERN_WARNING PREFIX
97 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
98 segment, scope->bus, path->dev, path->fn);
99 *dev = NULL;
100 return 0;
101 }
102 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
103 pdev->subordinate) || (scope->entry_type == \
104 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
105 pci_dev_put(pdev);
106 printk(KERN_WARNING PREFIX
107 "Device scope type does not match for %s\n",
108 pci_name(pdev));
109 return -EINVAL;
110 }
111 *dev = pdev;
112 return 0;
113}
114
115static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
116 struct pci_dev ***devices, u16 segment)
117{
118 struct acpi_dmar_device_scope *scope;
119 void * tmp = start;
120 int index;
121 int ret;
122
123 *cnt = 0;
124 while (start < end) {
125 scope = start;
126 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
127 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
128 (*cnt)++;
129 else
130 printk(KERN_WARNING PREFIX
131 "Unsupported device scope\n");
132 start += scope->length;
133 }
134 if (*cnt == 0)
135 return 0;
136
137 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
138 if (!*devices)
139 return -ENOMEM;
140
141 start = tmp;
142 index = 0;
143 while (start < end) {
144 scope = start;
145 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
146 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
147 ret = dmar_parse_one_dev_scope(scope,
148 &(*devices)[index], segment);
149 if (ret) {
150 kfree(*devices);
151 return ret;
152 }
153 index ++;
154 }
155 start += scope->length;
156 }
157
158 return 0;
159}
160
161/**
162 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
163 * structure which uniquely represent one DMA remapping hardware unit
164 * present in the platform
165 */
166static int __init
167dmar_parse_one_drhd(struct acpi_dmar_header *header)
168{
169 struct acpi_dmar_hardware_unit *drhd;
170 struct dmar_drhd_unit *dmaru;
171 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700172
173 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
174 if (!dmaru)
175 return -ENOMEM;
176
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700177 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700178 drhd = (struct acpi_dmar_hardware_unit *)header;
179 dmaru->reg_base_addr = drhd->address;
180 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
181
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700182 ret = alloc_iommu(dmaru);
183 if (ret) {
184 kfree(dmaru);
185 return ret;
186 }
187 dmar_register_drhd_unit(dmaru);
188 return 0;
189}
190
191static int __init
192dmar_parse_dev(struct dmar_drhd_unit *dmaru)
193{
194 struct acpi_dmar_hardware_unit *drhd;
195 static int include_all;
196 int ret;
197
198 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
199
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700200 if (!dmaru->include_all)
201 ret = dmar_parse_dev_scope((void *)(drhd + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700202 ((void *)drhd) + drhd->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700203 &dmaru->devices_cnt, &dmaru->devices,
204 drhd->segment);
205 else {
206 /* Only allow one INCLUDE_ALL */
207 if (include_all) {
208 printk(KERN_WARNING PREFIX "Only one INCLUDE_ALL "
209 "device scope is allowed\n");
210 ret = -EINVAL;
211 }
212 include_all = 1;
213 }
214
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700215 if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all)) {
216 list_del(&dmaru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700217 kfree(dmaru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700218 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700219 return ret;
220}
221
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700222#ifdef CONFIG_DMAR
223LIST_HEAD(dmar_rmrr_units);
224
225static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
226{
227 list_add(&rmrr->list, &dmar_rmrr_units);
228}
229
230
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700231static int __init
232dmar_parse_one_rmrr(struct acpi_dmar_header *header)
233{
234 struct acpi_dmar_reserved_memory *rmrr;
235 struct dmar_rmrr_unit *rmrru;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700236
237 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
238 if (!rmrru)
239 return -ENOMEM;
240
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700241 rmrru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700242 rmrr = (struct acpi_dmar_reserved_memory *)header;
243 rmrru->base_address = rmrr->base_address;
244 rmrru->end_address = rmrr->end_address;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700245
246 dmar_register_rmrr_unit(rmrru);
247 return 0;
248}
249
250static int __init
251rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
252{
253 struct acpi_dmar_reserved_memory *rmrr;
254 int ret;
255
256 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700257 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700258 ((void *)rmrr) + rmrr->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700259 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
260
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700261 if (ret || (rmrru->devices_cnt == 0)) {
262 list_del(&rmrru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700263 kfree(rmrru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700264 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700265 return ret;
266}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700267#endif
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700268
269static void __init
270dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
271{
272 struct acpi_dmar_hardware_unit *drhd;
273 struct acpi_dmar_reserved_memory *rmrr;
274
275 switch (header->type) {
276 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
277 drhd = (struct acpi_dmar_hardware_unit *)header;
278 printk (KERN_INFO PREFIX
279 "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
280 drhd->flags, drhd->address);
281 break;
282 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
283 rmrr = (struct acpi_dmar_reserved_memory *)header;
284
285 printk (KERN_INFO PREFIX
286 "RMRR base: 0x%016Lx end: 0x%016Lx\n",
287 rmrr->base_address, rmrr->end_address);
288 break;
289 }
290}
291
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700292
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700293/**
294 * parse_dmar_table - parses the DMA reporting table
295 */
296static int __init
297parse_dmar_table(void)
298{
299 struct acpi_table_dmar *dmar;
300 struct acpi_dmar_header *entry_header;
301 int ret = 0;
302
303 dmar = (struct acpi_table_dmar *)dmar_tbl;
304 if (!dmar)
305 return -ENODEV;
306
Fenghua Yu093f87d2007-11-21 15:07:14 -0800307 if (dmar->width < PAGE_SHIFT_4K - 1) {
308 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700309 return -EINVAL;
310 }
311
312 printk (KERN_INFO PREFIX "Host address width %d\n",
313 dmar->width + 1);
314
315 entry_header = (struct acpi_dmar_header *)(dmar + 1);
316 while (((unsigned long)entry_header) <
317 (((unsigned long)dmar) + dmar_tbl->length)) {
318 dmar_table_print_dmar_entry(entry_header);
319
320 switch (entry_header->type) {
321 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
322 ret = dmar_parse_one_drhd(entry_header);
323 break;
324 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700325#ifdef CONFIG_DMAR
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700326 ret = dmar_parse_one_rmrr(entry_header);
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700327#endif
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700328 break;
329 default:
330 printk(KERN_WARNING PREFIX
331 "Unknown DMAR structure type\n");
332 ret = 0; /* for forward compatibility */
333 break;
334 }
335 if (ret)
336 break;
337
338 entry_header = ((void *)entry_header + entry_header->length);
339 }
340 return ret;
341}
342
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700343int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
344 struct pci_dev *dev)
345{
346 int index;
347
348 while (dev) {
349 for (index = 0; index < cnt; index++)
350 if (dev == devices[index])
351 return 1;
352
353 /* Check our parent */
354 dev = dev->bus->self;
355 }
356
357 return 0;
358}
359
360struct dmar_drhd_unit *
361dmar_find_matched_drhd_unit(struct pci_dev *dev)
362{
363 struct dmar_drhd_unit *drhd = NULL;
364
365 list_for_each_entry(drhd, &dmar_drhd_units, list) {
366 if (drhd->include_all || dmar_pci_device_match(drhd->devices,
367 drhd->devices_cnt, dev))
368 return drhd;
369 }
370
371 return NULL;
372}
373
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700374int __init dmar_dev_scope_init(void)
375{
376 struct dmar_drhd_unit *drhd;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700377 int ret = -ENODEV;
378
379 for_each_drhd_unit(drhd) {
380 ret = dmar_parse_dev(drhd);
381 if (ret)
382 return ret;
383 }
384
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700385#ifdef CONFIG_DMAR
386 {
387 struct dmar_rmrr_unit *rmrr;
388 for_each_rmrr_units(rmrr) {
389 ret = rmrr_parse_dev(rmrr);
390 if (ret)
391 return ret;
392 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700393 }
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700394#endif
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700395
396 return ret;
397}
398
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700399
400int __init dmar_table_init(void)
401{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700402 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800403 int ret;
404
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700405 if (dmar_table_initialized)
406 return 0;
407
408 dmar_table_initialized = 1;
409
Fenghua Yu093f87d2007-11-21 15:07:14 -0800410 ret = parse_dmar_table();
411 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700412 if (ret != -ENODEV)
413 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
Fenghua Yu093f87d2007-11-21 15:07:14 -0800414 return ret;
415 }
416
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700417 if (list_empty(&dmar_drhd_units)) {
418 printk(KERN_INFO PREFIX "No DMAR devices found\n");
419 return -ENODEV;
420 }
Fenghua Yu093f87d2007-11-21 15:07:14 -0800421
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700422#ifdef CONFIG_DMAR
Suresh Siddha2d6b5f82008-07-10 11:16:39 -0700423 if (list_empty(&dmar_rmrr_units))
Fenghua Yu093f87d2007-11-21 15:07:14 -0800424 printk(KERN_INFO PREFIX "No RMRR found\n");
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700425#endif
Fenghua Yu093f87d2007-11-21 15:07:14 -0800426
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700427#ifdef CONFIG_INTR_REMAP
428 parse_ioapics_under_ir();
429#endif
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700430 return 0;
431}
432
433/**
434 * early_dmar_detect - checks to see if the platform supports DMAR devices
435 */
436int __init early_dmar_detect(void)
437{
438 acpi_status status = AE_OK;
439
440 /* if we could find DMAR table, then there are DMAR devices */
441 status = acpi_get_table(ACPI_SIG_DMAR, 0,
442 (struct acpi_table_header **)&dmar_tbl);
443
444 if (ACPI_SUCCESS(status) && !dmar_tbl) {
445 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
446 status = AE_NOT_FOUND;
447 }
448
449 return (ACPI_SUCCESS(status) ? 1 : 0);
450}
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700451
Suresh Siddha2ae21012008-07-10 11:16:43 -0700452void __init detect_intel_iommu(void)
453{
454 int ret;
455
456 ret = early_dmar_detect();
457
Suresh Siddha2ae21012008-07-10 11:16:43 -0700458 {
Youquan Songcacd4212008-10-16 16:31:57 -0700459#ifdef CONFIG_INTR_REMAP
Suresh Siddha1cb11582008-07-10 11:16:51 -0700460 struct acpi_table_dmar *dmar;
461 /*
462 * for now we will disable dma-remapping when interrupt
463 * remapping is enabled.
464 * When support for queued invalidation for IOTLB invalidation
465 * is added, we will not need this any more.
466 */
467 dmar = (struct acpi_table_dmar *) dmar_tbl;
Youquan Songcacd4212008-10-16 16:31:57 -0700468 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
Suresh Siddha1cb11582008-07-10 11:16:51 -0700469 printk(KERN_INFO
470 "Queued invalidation will be enabled to support "
471 "x2apic and Intr-remapping.\n");
Youquan Songcacd4212008-10-16 16:31:57 -0700472#endif
Suresh Siddha1cb11582008-07-10 11:16:51 -0700473
Youquan Songcacd4212008-10-16 16:31:57 -0700474#ifdef CONFIG_DMAR
Suresh Siddha2ae21012008-07-10 11:16:43 -0700475 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
476 !dmar_disabled)
477 iommu_detected = 1;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700478#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700479 }
Suresh Siddha2ae21012008-07-10 11:16:43 -0700480}
481
482
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700483int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700484{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700485 struct intel_iommu *iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700486 int map_size;
487 u32 ver;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700488 static int iommu_allocated = 0;
489
490 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
491 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700492 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700493
494 iommu->seq_id = iommu_allocated++;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700495
496 iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
497 if (!iommu->reg) {
498 printk(KERN_ERR "IOMMU: can't map the region\n");
499 goto error;
500 }
501 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
502 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
503
504 /* the registers might be more than one page */
505 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
506 cap_max_fault_reg_offset(iommu->cap));
507 map_size = PAGE_ALIGN_4K(map_size);
508 if (map_size > PAGE_SIZE_4K) {
509 iounmap(iommu->reg);
510 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
511 if (!iommu->reg) {
512 printk(KERN_ERR "IOMMU: can't map the region\n");
513 goto error;
514 }
515 }
516
517 ver = readl(iommu->reg + DMAR_VER_REG);
518 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
519 drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
520 iommu->cap, iommu->ecap);
521
522 spin_lock_init(&iommu->register_lock);
523
524 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700525 return 0;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700526error:
527 kfree(iommu);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700528 return -1;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700529}
530
531void free_iommu(struct intel_iommu *iommu)
532{
533 if (!iommu)
534 return;
535
536#ifdef CONFIG_DMAR
537 free_dmar_iommu(iommu);
538#endif
539
540 if (iommu->reg)
541 iounmap(iommu->reg);
542 kfree(iommu);
543}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700544
545/*
546 * Reclaim all the submitted descriptors which have completed its work.
547 */
548static inline void reclaim_free_desc(struct q_inval *qi)
549{
550 while (qi->desc_status[qi->free_tail] == QI_DONE) {
551 qi->desc_status[qi->free_tail] = QI_FREE;
552 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
553 qi->free_cnt++;
554 }
555}
556
557/*
558 * Submit the queued invalidation descriptor to the remapping
559 * hardware unit and wait for its completion.
560 */
561void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
562{
563 struct q_inval *qi = iommu->qi;
564 struct qi_desc *hw, wait_desc;
565 int wait_index, index;
566 unsigned long flags;
567
568 if (!qi)
569 return;
570
571 hw = qi->desc;
572
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700573 spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700574 while (qi->free_cnt < 3) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700575 spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700576 cpu_relax();
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700577 spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700578 }
579
580 index = qi->free_head;
581 wait_index = (index + 1) % QI_LENGTH;
582
583 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
584
585 hw[index] = *desc;
586
587 wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
588 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
589
590 hw[wait_index] = wait_desc;
591
592 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
593 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
594
595 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
596 qi->free_cnt -= 2;
597
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700598 spin_lock(&iommu->register_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700599 /*
600 * update the HW tail register indicating the presence of
601 * new descriptors.
602 */
603 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700604 spin_unlock(&iommu->register_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700605
606 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700607 /*
608 * We will leave the interrupts disabled, to prevent interrupt
609 * context to queue another cmd while a cmd is already submitted
610 * and waiting for completion on this cpu. This is to avoid
611 * a deadlock where the interrupt context can wait indefinitely
612 * for free slots in the queue.
613 */
Suresh Siddhafe962e92008-07-10 11:16:42 -0700614 spin_unlock(&qi->q_lock);
615 cpu_relax();
616 spin_lock(&qi->q_lock);
617 }
618
619 qi->desc_status[index] = QI_DONE;
620
621 reclaim_free_desc(qi);
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700622 spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700623}
624
625/*
626 * Flush the global interrupt entry cache.
627 */
628void qi_global_iec(struct intel_iommu *iommu)
629{
630 struct qi_desc desc;
631
632 desc.low = QI_IEC_TYPE;
633 desc.high = 0;
634
635 qi_submit_sync(&desc, iommu);
636}
637
Youquan Song3481f212008-10-16 16:31:55 -0700638int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
639 u64 type, int non_present_entry_flush)
640{
641
642 struct qi_desc desc;
643
644 if (non_present_entry_flush) {
645 if (!cap_caching_mode(iommu->cap))
646 return 1;
647 else
648 did = 0;
649 }
650
651 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
652 | QI_CC_GRAN(type) | QI_CC_TYPE;
653 desc.high = 0;
654
655 qi_submit_sync(&desc, iommu);
656
657 return 0;
658
659}
660
661int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
662 unsigned int size_order, u64 type,
663 int non_present_entry_flush)
664{
665 u8 dw = 0, dr = 0;
666
667 struct qi_desc desc;
668 int ih = 0;
669
670 if (non_present_entry_flush) {
671 if (!cap_caching_mode(iommu->cap))
672 return 1;
673 else
674 did = 0;
675 }
676
677 if (cap_write_drain(iommu->cap))
678 dw = 1;
679
680 if (cap_read_drain(iommu->cap))
681 dr = 1;
682
683 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
684 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
685 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
686 | QI_IOTLB_AM(size_order);
687
688 qi_submit_sync(&desc, iommu);
689
690 return 0;
691
692}
693
Suresh Siddhafe962e92008-07-10 11:16:42 -0700694/*
695 * Enable Queued Invalidation interface. This is a must to support
696 * interrupt-remapping. Also used by DMA-remapping, which replaces
697 * register based IOTLB invalidation.
698 */
699int dmar_enable_qi(struct intel_iommu *iommu)
700{
701 u32 cmd, sts;
702 unsigned long flags;
703 struct q_inval *qi;
704
705 if (!ecap_qis(iommu->ecap))
706 return -ENOENT;
707
708 /*
709 * queued invalidation is already setup and enabled.
710 */
711 if (iommu->qi)
712 return 0;
713
714 iommu->qi = kmalloc(sizeof(*qi), GFP_KERNEL);
715 if (!iommu->qi)
716 return -ENOMEM;
717
718 qi = iommu->qi;
719
720 qi->desc = (void *)(get_zeroed_page(GFP_KERNEL));
721 if (!qi->desc) {
722 kfree(qi);
723 iommu->qi = 0;
724 return -ENOMEM;
725 }
726
727 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_KERNEL);
728 if (!qi->desc_status) {
729 free_page((unsigned long) qi->desc);
730 kfree(qi);
731 iommu->qi = 0;
732 return -ENOMEM;
733 }
734
735 qi->free_head = qi->free_tail = 0;
736 qi->free_cnt = QI_LENGTH;
737
738 spin_lock_init(&qi->q_lock);
739
740 spin_lock_irqsave(&iommu->register_lock, flags);
741 /* write zero to the tail reg */
742 writel(0, iommu->reg + DMAR_IQT_REG);
743
744 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
745
746 cmd = iommu->gcmd | DMA_GCMD_QIE;
747 iommu->gcmd |= DMA_GCMD_QIE;
748 writel(cmd, iommu->reg + DMAR_GCMD_REG);
749
750 /* Make sure hardware complete it */
751 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
752 spin_unlock_irqrestore(&iommu->register_lock, flags);
753
754 return 0;
755}