Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2006, Intel Corporation. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms and conditions of the GNU General Public License, |
| 6 | * version 2, as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 11 | * more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public License along with |
| 14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
| 15 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
| 16 | * |
mark gross | 98bcef5 | 2008-02-23 15:23:35 -0800 | [diff] [blame] | 17 | * Copyright (C) 2006-2008 Intel Corporation |
| 18 | * Author: Ashok Raj <ashok.raj@intel.com> |
| 19 | * Author: Shaohua Li <shaohua.li@intel.com> |
| 20 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 21 | * |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 22 | * This file implements early detection/parsing of Remapping Devices |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 23 | * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI |
| 24 | * tables. |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 25 | * |
| 26 | * These routines are used by both DMA-remapping and Interrupt-remapping |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 27 | */ |
| 28 | |
| 29 | #include <linux/pci.h> |
| 30 | #include <linux/dmar.h> |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 31 | #include <linux/iova.h> |
| 32 | #include <linux/intel-iommu.h> |
Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 33 | #include <linux/timer.h> |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 34 | |
| 35 | #undef PREFIX |
| 36 | #define PREFIX "DMAR:" |
| 37 | |
| 38 | /* No locks are needed as DMA remapping hardware unit |
| 39 | * list is constructed at boot time and hotplug of |
| 40 | * these units are not supported by the architecture. |
| 41 | */ |
| 42 | LIST_HEAD(dmar_drhd_units); |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 43 | |
| 44 | static struct acpi_table_header * __initdata dmar_tbl; |
| 45 | |
| 46 | static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) |
| 47 | { |
| 48 | /* |
| 49 | * add INCLUDE_ALL at the tail, so scan the list will find it at |
| 50 | * the very end. |
| 51 | */ |
| 52 | if (drhd->include_all) |
| 53 | list_add_tail(&drhd->list, &dmar_drhd_units); |
| 54 | else |
| 55 | list_add(&drhd->list, &dmar_drhd_units); |
| 56 | } |
| 57 | |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 58 | static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, |
| 59 | struct pci_dev **dev, u16 segment) |
| 60 | { |
| 61 | struct pci_bus *bus; |
| 62 | struct pci_dev *pdev = NULL; |
| 63 | struct acpi_dmar_pci_path *path; |
| 64 | int count; |
| 65 | |
| 66 | bus = pci_find_bus(segment, scope->bus); |
| 67 | path = (struct acpi_dmar_pci_path *)(scope + 1); |
| 68 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) |
| 69 | / sizeof(struct acpi_dmar_pci_path); |
| 70 | |
| 71 | while (count) { |
| 72 | if (pdev) |
| 73 | pci_dev_put(pdev); |
| 74 | /* |
| 75 | * Some BIOSes list non-exist devices in DMAR table, just |
| 76 | * ignore it |
| 77 | */ |
| 78 | if (!bus) { |
| 79 | printk(KERN_WARNING |
| 80 | PREFIX "Device scope bus [%d] not found\n", |
| 81 | scope->bus); |
| 82 | break; |
| 83 | } |
| 84 | pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn)); |
| 85 | if (!pdev) { |
| 86 | printk(KERN_WARNING PREFIX |
| 87 | "Device scope device [%04x:%02x:%02x.%02x] not found\n", |
| 88 | segment, bus->number, path->dev, path->fn); |
| 89 | break; |
| 90 | } |
| 91 | path ++; |
| 92 | count --; |
| 93 | bus = pdev->subordinate; |
| 94 | } |
| 95 | if (!pdev) { |
| 96 | printk(KERN_WARNING PREFIX |
| 97 | "Device scope device [%04x:%02x:%02x.%02x] not found\n", |
| 98 | segment, scope->bus, path->dev, path->fn); |
| 99 | *dev = NULL; |
| 100 | return 0; |
| 101 | } |
| 102 | if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \ |
| 103 | pdev->subordinate) || (scope->entry_type == \ |
| 104 | ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) { |
| 105 | pci_dev_put(pdev); |
| 106 | printk(KERN_WARNING PREFIX |
| 107 | "Device scope type does not match for %s\n", |
| 108 | pci_name(pdev)); |
| 109 | return -EINVAL; |
| 110 | } |
| 111 | *dev = pdev; |
| 112 | return 0; |
| 113 | } |
| 114 | |
| 115 | static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, |
| 116 | struct pci_dev ***devices, u16 segment) |
| 117 | { |
| 118 | struct acpi_dmar_device_scope *scope; |
| 119 | void * tmp = start; |
| 120 | int index; |
| 121 | int ret; |
| 122 | |
| 123 | *cnt = 0; |
| 124 | while (start < end) { |
| 125 | scope = start; |
| 126 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || |
| 127 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) |
| 128 | (*cnt)++; |
| 129 | else |
| 130 | printk(KERN_WARNING PREFIX |
| 131 | "Unsupported device scope\n"); |
| 132 | start += scope->length; |
| 133 | } |
| 134 | if (*cnt == 0) |
| 135 | return 0; |
| 136 | |
| 137 | *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL); |
| 138 | if (!*devices) |
| 139 | return -ENOMEM; |
| 140 | |
| 141 | start = tmp; |
| 142 | index = 0; |
| 143 | while (start < end) { |
| 144 | scope = start; |
| 145 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || |
| 146 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) { |
| 147 | ret = dmar_parse_one_dev_scope(scope, |
| 148 | &(*devices)[index], segment); |
| 149 | if (ret) { |
| 150 | kfree(*devices); |
| 151 | return ret; |
| 152 | } |
| 153 | index ++; |
| 154 | } |
| 155 | start += scope->length; |
| 156 | } |
| 157 | |
| 158 | return 0; |
| 159 | } |
| 160 | |
| 161 | /** |
| 162 | * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition |
| 163 | * structure which uniquely represent one DMA remapping hardware unit |
| 164 | * present in the platform |
| 165 | */ |
| 166 | static int __init |
| 167 | dmar_parse_one_drhd(struct acpi_dmar_header *header) |
| 168 | { |
| 169 | struct acpi_dmar_hardware_unit *drhd; |
| 170 | struct dmar_drhd_unit *dmaru; |
| 171 | int ret = 0; |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 172 | |
| 173 | dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); |
| 174 | if (!dmaru) |
| 175 | return -ENOMEM; |
| 176 | |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 177 | dmaru->hdr = header; |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 178 | drhd = (struct acpi_dmar_hardware_unit *)header; |
| 179 | dmaru->reg_base_addr = drhd->address; |
| 180 | dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ |
| 181 | |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 182 | ret = alloc_iommu(dmaru); |
| 183 | if (ret) { |
| 184 | kfree(dmaru); |
| 185 | return ret; |
| 186 | } |
| 187 | dmar_register_drhd_unit(dmaru); |
| 188 | return 0; |
| 189 | } |
| 190 | |
David Woodhouse | f82851a | 2008-10-18 15:43:14 +0100 | [diff] [blame] | 191 | static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 192 | { |
| 193 | struct acpi_dmar_hardware_unit *drhd; |
David Woodhouse | f82851a | 2008-10-18 15:43:14 +0100 | [diff] [blame] | 194 | int ret = 0; |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 195 | |
| 196 | drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; |
| 197 | |
Yu Zhao | 2e824f7 | 2008-12-22 16:54:58 +0800 | [diff] [blame] | 198 | if (dmaru->include_all) |
| 199 | return 0; |
| 200 | |
| 201 | ret = dmar_parse_dev_scope((void *)(drhd + 1), |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 202 | ((void *)drhd) + drhd->header.length, |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 203 | &dmaru->devices_cnt, &dmaru->devices, |
| 204 | drhd->segment); |
Suresh Siddha | 1c7d1bc | 2008-09-03 16:58:35 -0700 | [diff] [blame] | 205 | if (ret) { |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 206 | list_del(&dmaru->list); |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 207 | kfree(dmaru); |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 208 | } |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 209 | return ret; |
| 210 | } |
| 211 | |
Suresh Siddha | aaa9d1d | 2008-07-10 11:16:38 -0700 | [diff] [blame] | 212 | #ifdef CONFIG_DMAR |
| 213 | LIST_HEAD(dmar_rmrr_units); |
| 214 | |
| 215 | static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) |
| 216 | { |
| 217 | list_add(&rmrr->list, &dmar_rmrr_units); |
| 218 | } |
| 219 | |
| 220 | |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 221 | static int __init |
| 222 | dmar_parse_one_rmrr(struct acpi_dmar_header *header) |
| 223 | { |
| 224 | struct acpi_dmar_reserved_memory *rmrr; |
| 225 | struct dmar_rmrr_unit *rmrru; |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 226 | |
| 227 | rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); |
| 228 | if (!rmrru) |
| 229 | return -ENOMEM; |
| 230 | |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 231 | rmrru->hdr = header; |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 232 | rmrr = (struct acpi_dmar_reserved_memory *)header; |
| 233 | rmrru->base_address = rmrr->base_address; |
| 234 | rmrru->end_address = rmrr->end_address; |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 235 | |
| 236 | dmar_register_rmrr_unit(rmrru); |
| 237 | return 0; |
| 238 | } |
| 239 | |
| 240 | static int __init |
| 241 | rmrr_parse_dev(struct dmar_rmrr_unit *rmrru) |
| 242 | { |
| 243 | struct acpi_dmar_reserved_memory *rmrr; |
| 244 | int ret; |
| 245 | |
| 246 | rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr; |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 247 | ret = dmar_parse_dev_scope((void *)(rmrr + 1), |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 248 | ((void *)rmrr) + rmrr->header.length, |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 249 | &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); |
| 250 | |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 251 | if (ret || (rmrru->devices_cnt == 0)) { |
| 252 | list_del(&rmrru->list); |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 253 | kfree(rmrru); |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 254 | } |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 255 | return ret; |
| 256 | } |
Suresh Siddha | aaa9d1d | 2008-07-10 11:16:38 -0700 | [diff] [blame] | 257 | #endif |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 258 | |
| 259 | static void __init |
| 260 | dmar_table_print_dmar_entry(struct acpi_dmar_header *header) |
| 261 | { |
| 262 | struct acpi_dmar_hardware_unit *drhd; |
| 263 | struct acpi_dmar_reserved_memory *rmrr; |
| 264 | |
| 265 | switch (header->type) { |
| 266 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: |
| 267 | drhd = (struct acpi_dmar_hardware_unit *)header; |
| 268 | printk (KERN_INFO PREFIX |
| 269 | "DRHD (flags: 0x%08x)base: 0x%016Lx\n", |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 270 | drhd->flags, (unsigned long long)drhd->address); |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 271 | break; |
| 272 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: |
| 273 | rmrr = (struct acpi_dmar_reserved_memory *)header; |
| 274 | |
| 275 | printk (KERN_INFO PREFIX |
| 276 | "RMRR base: 0x%016Lx end: 0x%016Lx\n", |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 277 | (unsigned long long)rmrr->base_address, |
| 278 | (unsigned long long)rmrr->end_address); |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 279 | break; |
| 280 | } |
| 281 | } |
| 282 | |
Yinghai Lu | f6dd5c3 | 2008-09-03 16:58:32 -0700 | [diff] [blame] | 283 | /** |
| 284 | * dmar_table_detect - checks to see if the platform supports DMAR devices |
| 285 | */ |
| 286 | static int __init dmar_table_detect(void) |
| 287 | { |
| 288 | acpi_status status = AE_OK; |
| 289 | |
| 290 | /* if we could find DMAR table, then there are DMAR devices */ |
| 291 | status = acpi_get_table(ACPI_SIG_DMAR, 0, |
| 292 | (struct acpi_table_header **)&dmar_tbl); |
| 293 | |
| 294 | if (ACPI_SUCCESS(status) && !dmar_tbl) { |
| 295 | printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); |
| 296 | status = AE_NOT_FOUND; |
| 297 | } |
| 298 | |
| 299 | return (ACPI_SUCCESS(status) ? 1 : 0); |
| 300 | } |
Suresh Siddha | aaa9d1d | 2008-07-10 11:16:38 -0700 | [diff] [blame] | 301 | |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 302 | /** |
| 303 | * parse_dmar_table - parses the DMA reporting table |
| 304 | */ |
| 305 | static int __init |
| 306 | parse_dmar_table(void) |
| 307 | { |
| 308 | struct acpi_table_dmar *dmar; |
| 309 | struct acpi_dmar_header *entry_header; |
| 310 | int ret = 0; |
| 311 | |
Yinghai Lu | f6dd5c3 | 2008-09-03 16:58:32 -0700 | [diff] [blame] | 312 | /* |
| 313 | * Do it again, earlier dmar_tbl mapping could be mapped with |
| 314 | * fixed map. |
| 315 | */ |
| 316 | dmar_table_detect(); |
| 317 | |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 318 | dmar = (struct acpi_table_dmar *)dmar_tbl; |
| 319 | if (!dmar) |
| 320 | return -ENODEV; |
| 321 | |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 322 | if (dmar->width < PAGE_SHIFT - 1) { |
Fenghua Yu | 093f87d | 2007-11-21 15:07:14 -0800 | [diff] [blame] | 323 | printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 324 | return -EINVAL; |
| 325 | } |
| 326 | |
| 327 | printk (KERN_INFO PREFIX "Host address width %d\n", |
| 328 | dmar->width + 1); |
| 329 | |
| 330 | entry_header = (struct acpi_dmar_header *)(dmar + 1); |
| 331 | while (((unsigned long)entry_header) < |
| 332 | (((unsigned long)dmar) + dmar_tbl->length)) { |
Tony Battersby | 084eb96 | 2009-02-11 13:24:19 -0800 | [diff] [blame] | 333 | /* Avoid looping forever on bad ACPI tables */ |
| 334 | if (entry_header->length == 0) { |
| 335 | printk(KERN_WARNING PREFIX |
| 336 | "Invalid 0-length structure\n"); |
| 337 | ret = -EINVAL; |
| 338 | break; |
| 339 | } |
| 340 | |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 341 | dmar_table_print_dmar_entry(entry_header); |
| 342 | |
| 343 | switch (entry_header->type) { |
| 344 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: |
| 345 | ret = dmar_parse_one_drhd(entry_header); |
| 346 | break; |
| 347 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: |
Suresh Siddha | aaa9d1d | 2008-07-10 11:16:38 -0700 | [diff] [blame] | 348 | #ifdef CONFIG_DMAR |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 349 | ret = dmar_parse_one_rmrr(entry_header); |
Suresh Siddha | aaa9d1d | 2008-07-10 11:16:38 -0700 | [diff] [blame] | 350 | #endif |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 351 | break; |
| 352 | default: |
| 353 | printk(KERN_WARNING PREFIX |
| 354 | "Unknown DMAR structure type\n"); |
| 355 | ret = 0; /* for forward compatibility */ |
| 356 | break; |
| 357 | } |
| 358 | if (ret) |
| 359 | break; |
| 360 | |
| 361 | entry_header = ((void *)entry_header + entry_header->length); |
| 362 | } |
| 363 | return ret; |
| 364 | } |
| 365 | |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 366 | int dmar_pci_device_match(struct pci_dev *devices[], int cnt, |
| 367 | struct pci_dev *dev) |
| 368 | { |
| 369 | int index; |
| 370 | |
| 371 | while (dev) { |
| 372 | for (index = 0; index < cnt; index++) |
| 373 | if (dev == devices[index]) |
| 374 | return 1; |
| 375 | |
| 376 | /* Check our parent */ |
| 377 | dev = dev->bus->self; |
| 378 | } |
| 379 | |
| 380 | return 0; |
| 381 | } |
| 382 | |
| 383 | struct dmar_drhd_unit * |
| 384 | dmar_find_matched_drhd_unit(struct pci_dev *dev) |
| 385 | { |
Yu Zhao | 2e824f7 | 2008-12-22 16:54:58 +0800 | [diff] [blame] | 386 | struct dmar_drhd_unit *dmaru = NULL; |
| 387 | struct acpi_dmar_hardware_unit *drhd; |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 388 | |
Yu Zhao | 2e824f7 | 2008-12-22 16:54:58 +0800 | [diff] [blame] | 389 | list_for_each_entry(dmaru, &dmar_drhd_units, list) { |
| 390 | drhd = container_of(dmaru->hdr, |
| 391 | struct acpi_dmar_hardware_unit, |
| 392 | header); |
| 393 | |
| 394 | if (dmaru->include_all && |
| 395 | drhd->segment == pci_domain_nr(dev->bus)) |
| 396 | return dmaru; |
| 397 | |
| 398 | if (dmar_pci_device_match(dmaru->devices, |
| 399 | dmaru->devices_cnt, dev)) |
| 400 | return dmaru; |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 401 | } |
| 402 | |
| 403 | return NULL; |
| 404 | } |
| 405 | |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 406 | int __init dmar_dev_scope_init(void) |
| 407 | { |
Suresh Siddha | 04e2ea6 | 2008-09-03 16:58:34 -0700 | [diff] [blame] | 408 | struct dmar_drhd_unit *drhd, *drhd_n; |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 409 | int ret = -ENODEV; |
| 410 | |
Suresh Siddha | 04e2ea6 | 2008-09-03 16:58:34 -0700 | [diff] [blame] | 411 | list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) { |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 412 | ret = dmar_parse_dev(drhd); |
| 413 | if (ret) |
| 414 | return ret; |
| 415 | } |
| 416 | |
Suresh Siddha | aaa9d1d | 2008-07-10 11:16:38 -0700 | [diff] [blame] | 417 | #ifdef CONFIG_DMAR |
| 418 | { |
Suresh Siddha | 04e2ea6 | 2008-09-03 16:58:34 -0700 | [diff] [blame] | 419 | struct dmar_rmrr_unit *rmrr, *rmrr_n; |
| 420 | list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) { |
Suresh Siddha | aaa9d1d | 2008-07-10 11:16:38 -0700 | [diff] [blame] | 421 | ret = rmrr_parse_dev(rmrr); |
| 422 | if (ret) |
| 423 | return ret; |
| 424 | } |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 425 | } |
Suresh Siddha | aaa9d1d | 2008-07-10 11:16:38 -0700 | [diff] [blame] | 426 | #endif |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 427 | |
| 428 | return ret; |
| 429 | } |
| 430 | |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 431 | |
| 432 | int __init dmar_table_init(void) |
| 433 | { |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 434 | static int dmar_table_initialized; |
Fenghua Yu | 093f87d | 2007-11-21 15:07:14 -0800 | [diff] [blame] | 435 | int ret; |
| 436 | |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 437 | if (dmar_table_initialized) |
| 438 | return 0; |
| 439 | |
| 440 | dmar_table_initialized = 1; |
| 441 | |
Fenghua Yu | 093f87d | 2007-11-21 15:07:14 -0800 | [diff] [blame] | 442 | ret = parse_dmar_table(); |
| 443 | if (ret) { |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 444 | if (ret != -ENODEV) |
| 445 | printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); |
Fenghua Yu | 093f87d | 2007-11-21 15:07:14 -0800 | [diff] [blame] | 446 | return ret; |
| 447 | } |
| 448 | |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 449 | if (list_empty(&dmar_drhd_units)) { |
| 450 | printk(KERN_INFO PREFIX "No DMAR devices found\n"); |
| 451 | return -ENODEV; |
| 452 | } |
Fenghua Yu | 093f87d | 2007-11-21 15:07:14 -0800 | [diff] [blame] | 453 | |
Suresh Siddha | aaa9d1d | 2008-07-10 11:16:38 -0700 | [diff] [blame] | 454 | #ifdef CONFIG_DMAR |
Suresh Siddha | 2d6b5f8 | 2008-07-10 11:16:39 -0700 | [diff] [blame] | 455 | if (list_empty(&dmar_rmrr_units)) |
Fenghua Yu | 093f87d | 2007-11-21 15:07:14 -0800 | [diff] [blame] | 456 | printk(KERN_INFO PREFIX "No RMRR found\n"); |
Suresh Siddha | aaa9d1d | 2008-07-10 11:16:38 -0700 | [diff] [blame] | 457 | #endif |
Fenghua Yu | 093f87d | 2007-11-21 15:07:14 -0800 | [diff] [blame] | 458 | |
Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 459 | #ifdef CONFIG_INTR_REMAP |
| 460 | parse_ioapics_under_ir(); |
| 461 | #endif |
Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 462 | return 0; |
| 463 | } |
| 464 | |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 465 | void __init detect_intel_iommu(void) |
| 466 | { |
| 467 | int ret; |
| 468 | |
Yinghai Lu | f6dd5c3 | 2008-09-03 16:58:32 -0700 | [diff] [blame] | 469 | ret = dmar_table_detect(); |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 470 | |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 471 | { |
Youquan Song | cacd421 | 2008-10-16 16:31:57 -0700 | [diff] [blame] | 472 | #ifdef CONFIG_INTR_REMAP |
Suresh Siddha | 1cb1158 | 2008-07-10 11:16:51 -0700 | [diff] [blame] | 473 | struct acpi_table_dmar *dmar; |
| 474 | /* |
| 475 | * for now we will disable dma-remapping when interrupt |
| 476 | * remapping is enabled. |
| 477 | * When support for queued invalidation for IOTLB invalidation |
| 478 | * is added, we will not need this any more. |
| 479 | */ |
| 480 | dmar = (struct acpi_table_dmar *) dmar_tbl; |
Youquan Song | cacd421 | 2008-10-16 16:31:57 -0700 | [diff] [blame] | 481 | if (ret && cpu_has_x2apic && dmar->flags & 0x1) |
Suresh Siddha | 1cb1158 | 2008-07-10 11:16:51 -0700 | [diff] [blame] | 482 | printk(KERN_INFO |
| 483 | "Queued invalidation will be enabled to support " |
| 484 | "x2apic and Intr-remapping.\n"); |
Youquan Song | cacd421 | 2008-10-16 16:31:57 -0700 | [diff] [blame] | 485 | #endif |
Youquan Song | cacd421 | 2008-10-16 16:31:57 -0700 | [diff] [blame] | 486 | #ifdef CONFIG_DMAR |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 487 | if (ret && !no_iommu && !iommu_detected && !swiotlb && |
| 488 | !dmar_disabled) |
| 489 | iommu_detected = 1; |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 490 | #endif |
Youquan Song | cacd421 | 2008-10-16 16:31:57 -0700 | [diff] [blame] | 491 | } |
Yinghai Lu | f6dd5c3 | 2008-09-03 16:58:32 -0700 | [diff] [blame] | 492 | dmar_tbl = NULL; |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 493 | } |
| 494 | |
| 495 | |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 496 | int alloc_iommu(struct dmar_drhd_unit *drhd) |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 497 | { |
Suresh Siddha | c42d9f3 | 2008-07-10 11:16:36 -0700 | [diff] [blame] | 498 | struct intel_iommu *iommu; |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 499 | int map_size; |
| 500 | u32 ver; |
Suresh Siddha | c42d9f3 | 2008-07-10 11:16:36 -0700 | [diff] [blame] | 501 | static int iommu_allocated = 0; |
Joerg Roedel | 43f7392 | 2009-01-03 23:56:27 +0100 | [diff] [blame] | 502 | int agaw = 0; |
Suresh Siddha | c42d9f3 | 2008-07-10 11:16:36 -0700 | [diff] [blame] | 503 | |
| 504 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); |
| 505 | if (!iommu) |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 506 | return -ENOMEM; |
Suresh Siddha | c42d9f3 | 2008-07-10 11:16:36 -0700 | [diff] [blame] | 507 | |
| 508 | iommu->seq_id = iommu_allocated++; |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 509 | |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 510 | iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 511 | if (!iommu->reg) { |
| 512 | printk(KERN_ERR "IOMMU: can't map the region\n"); |
| 513 | goto error; |
| 514 | } |
| 515 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); |
| 516 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); |
| 517 | |
Joerg Roedel | 43f7392 | 2009-01-03 23:56:27 +0100 | [diff] [blame] | 518 | #ifdef CONFIG_DMAR |
Weidong Han | 1b57368 | 2008-12-08 15:34:06 +0800 | [diff] [blame] | 519 | agaw = iommu_calculate_agaw(iommu); |
| 520 | if (agaw < 0) { |
| 521 | printk(KERN_ERR |
| 522 | "Cannot get a valid agaw for iommu (seq_id = %d)\n", |
| 523 | iommu->seq_id); |
| 524 | goto error; |
| 525 | } |
Joerg Roedel | 43f7392 | 2009-01-03 23:56:27 +0100 | [diff] [blame] | 526 | #endif |
Weidong Han | 1b57368 | 2008-12-08 15:34:06 +0800 | [diff] [blame] | 527 | iommu->agaw = agaw; |
| 528 | |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 529 | /* the registers might be more than one page */ |
| 530 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), |
| 531 | cap_max_fault_reg_offset(iommu->cap)); |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 532 | map_size = VTD_PAGE_ALIGN(map_size); |
| 533 | if (map_size > VTD_PAGE_SIZE) { |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 534 | iounmap(iommu->reg); |
| 535 | iommu->reg = ioremap(drhd->reg_base_addr, map_size); |
| 536 | if (!iommu->reg) { |
| 537 | printk(KERN_ERR "IOMMU: can't map the region\n"); |
| 538 | goto error; |
| 539 | } |
| 540 | } |
| 541 | |
| 542 | ver = readl(iommu->reg + DMAR_VER_REG); |
| 543 | pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 544 | (unsigned long long)drhd->reg_base_addr, |
| 545 | DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), |
| 546 | (unsigned long long)iommu->cap, |
| 547 | (unsigned long long)iommu->ecap); |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 548 | |
| 549 | spin_lock_init(&iommu->register_lock); |
| 550 | |
| 551 | drhd->iommu = iommu; |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 552 | return 0; |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 553 | error: |
| 554 | kfree(iommu); |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 555 | return -1; |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 556 | } |
| 557 | |
| 558 | void free_iommu(struct intel_iommu *iommu) |
| 559 | { |
| 560 | if (!iommu) |
| 561 | return; |
| 562 | |
| 563 | #ifdef CONFIG_DMAR |
| 564 | free_dmar_iommu(iommu); |
| 565 | #endif |
| 566 | |
| 567 | if (iommu->reg) |
| 568 | iounmap(iommu->reg); |
| 569 | kfree(iommu); |
| 570 | } |
Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 571 | |
| 572 | /* |
| 573 | * Reclaim all the submitted descriptors which have completed its work. |
| 574 | */ |
| 575 | static inline void reclaim_free_desc(struct q_inval *qi) |
| 576 | { |
| 577 | while (qi->desc_status[qi->free_tail] == QI_DONE) { |
| 578 | qi->desc_status[qi->free_tail] = QI_FREE; |
| 579 | qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; |
| 580 | qi->free_cnt++; |
| 581 | } |
| 582 | } |
| 583 | |
Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 584 | static int qi_check_fault(struct intel_iommu *iommu, int index) |
| 585 | { |
| 586 | u32 fault; |
| 587 | int head; |
| 588 | struct q_inval *qi = iommu->qi; |
| 589 | int wait_index = (index + 1) % QI_LENGTH; |
| 590 | |
| 591 | fault = readl(iommu->reg + DMAR_FSTS_REG); |
| 592 | |
| 593 | /* |
| 594 | * If IQE happens, the head points to the descriptor associated |
| 595 | * with the error. No new descriptors are fetched until the IQE |
| 596 | * is cleared. |
| 597 | */ |
| 598 | if (fault & DMA_FSTS_IQE) { |
| 599 | head = readl(iommu->reg + DMAR_IQH_REG); |
| 600 | if ((head >> 4) == index) { |
| 601 | memcpy(&qi->desc[index], &qi->desc[wait_index], |
| 602 | sizeof(struct qi_desc)); |
| 603 | __iommu_flush_cache(iommu, &qi->desc[index], |
| 604 | sizeof(struct qi_desc)); |
| 605 | writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); |
| 606 | return -EINVAL; |
| 607 | } |
| 608 | } |
| 609 | |
| 610 | return 0; |
| 611 | } |
| 612 | |
Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 613 | /* |
| 614 | * Submit the queued invalidation descriptor to the remapping |
| 615 | * hardware unit and wait for its completion. |
| 616 | */ |
Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 617 | int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) |
Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 618 | { |
Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 619 | int rc = 0; |
Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 620 | struct q_inval *qi = iommu->qi; |
| 621 | struct qi_desc *hw, wait_desc; |
| 622 | int wait_index, index; |
| 623 | unsigned long flags; |
| 624 | |
| 625 | if (!qi) |
Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 626 | return 0; |
Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 627 | |
| 628 | hw = qi->desc; |
| 629 | |
Suresh Siddha | f05810c | 2008-10-16 16:31:54 -0700 | [diff] [blame] | 630 | spin_lock_irqsave(&qi->q_lock, flags); |
Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 631 | while (qi->free_cnt < 3) { |
Suresh Siddha | f05810c | 2008-10-16 16:31:54 -0700 | [diff] [blame] | 632 | spin_unlock_irqrestore(&qi->q_lock, flags); |
Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 633 | cpu_relax(); |
Suresh Siddha | f05810c | 2008-10-16 16:31:54 -0700 | [diff] [blame] | 634 | spin_lock_irqsave(&qi->q_lock, flags); |
Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 635 | } |
| 636 | |
| 637 | index = qi->free_head; |
| 638 | wait_index = (index + 1) % QI_LENGTH; |
| 639 | |
| 640 | qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE; |
| 641 | |
| 642 | hw[index] = *desc; |
| 643 | |
Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 644 | wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) | |
| 645 | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; |
Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 646 | wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); |
| 647 | |
| 648 | hw[wait_index] = wait_desc; |
| 649 | |
| 650 | __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc)); |
| 651 | __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc)); |
| 652 | |
| 653 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; |
| 654 | qi->free_cnt -= 2; |
| 655 | |
Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 656 | /* |
| 657 | * update the HW tail register indicating the presence of |
| 658 | * new descriptors. |
| 659 | */ |
| 660 | writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); |
Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 661 | |
| 662 | while (qi->desc_status[wait_index] != QI_DONE) { |
Suresh Siddha | f05810c | 2008-10-16 16:31:54 -0700 | [diff] [blame] | 663 | /* |
| 664 | * We will leave the interrupts disabled, to prevent interrupt |
| 665 | * context to queue another cmd while a cmd is already submitted |
| 666 | * and waiting for completion on this cpu. This is to avoid |
| 667 | * a deadlock where the interrupt context can wait indefinitely |
| 668 | * for free slots in the queue. |
| 669 | */ |
Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 670 | rc = qi_check_fault(iommu, index); |
| 671 | if (rc) |
| 672 | goto out; |
| 673 | |
Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 674 | spin_unlock(&qi->q_lock); |
| 675 | cpu_relax(); |
| 676 | spin_lock(&qi->q_lock); |
| 677 | } |
Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 678 | out: |
| 679 | qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE; |
Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 680 | |
| 681 | reclaim_free_desc(qi); |
Suresh Siddha | f05810c | 2008-10-16 16:31:54 -0700 | [diff] [blame] | 682 | spin_unlock_irqrestore(&qi->q_lock, flags); |
Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 683 | |
| 684 | return rc; |
Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 685 | } |
| 686 | |
| 687 | /* |
| 688 | * Flush the global interrupt entry cache. |
| 689 | */ |
| 690 | void qi_global_iec(struct intel_iommu *iommu) |
| 691 | { |
| 692 | struct qi_desc desc; |
| 693 | |
| 694 | desc.low = QI_IEC_TYPE; |
| 695 | desc.high = 0; |
| 696 | |
Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 697 | /* should never fail */ |
Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 698 | qi_submit_sync(&desc, iommu); |
| 699 | } |
| 700 | |
Youquan Song | 3481f21 | 2008-10-16 16:31:55 -0700 | [diff] [blame] | 701 | int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, |
| 702 | u64 type, int non_present_entry_flush) |
| 703 | { |
Youquan Song | 3481f21 | 2008-10-16 16:31:55 -0700 | [diff] [blame] | 704 | struct qi_desc desc; |
| 705 | |
| 706 | if (non_present_entry_flush) { |
| 707 | if (!cap_caching_mode(iommu->cap)) |
| 708 | return 1; |
| 709 | else |
| 710 | did = 0; |
| 711 | } |
| 712 | |
| 713 | desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) |
| 714 | | QI_CC_GRAN(type) | QI_CC_TYPE; |
| 715 | desc.high = 0; |
| 716 | |
Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 717 | return qi_submit_sync(&desc, iommu); |
Youquan Song | 3481f21 | 2008-10-16 16:31:55 -0700 | [diff] [blame] | 718 | } |
| 719 | |
| 720 | int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, |
| 721 | unsigned int size_order, u64 type, |
| 722 | int non_present_entry_flush) |
| 723 | { |
| 724 | u8 dw = 0, dr = 0; |
| 725 | |
| 726 | struct qi_desc desc; |
| 727 | int ih = 0; |
| 728 | |
| 729 | if (non_present_entry_flush) { |
| 730 | if (!cap_caching_mode(iommu->cap)) |
| 731 | return 1; |
| 732 | else |
| 733 | did = 0; |
| 734 | } |
| 735 | |
| 736 | if (cap_write_drain(iommu->cap)) |
| 737 | dw = 1; |
| 738 | |
| 739 | if (cap_read_drain(iommu->cap)) |
| 740 | dr = 1; |
| 741 | |
| 742 | desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) |
| 743 | | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; |
| 744 | desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) |
| 745 | | QI_IOTLB_AM(size_order); |
| 746 | |
Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 747 | return qi_submit_sync(&desc, iommu); |
Youquan Song | 3481f21 | 2008-10-16 16:31:55 -0700 | [diff] [blame] | 748 | } |
| 749 | |
Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 750 | /* |
| 751 | * Enable Queued Invalidation interface. This is a must to support |
| 752 | * interrupt-remapping. Also used by DMA-remapping, which replaces |
| 753 | * register based IOTLB invalidation. |
| 754 | */ |
| 755 | int dmar_enable_qi(struct intel_iommu *iommu) |
| 756 | { |
| 757 | u32 cmd, sts; |
| 758 | unsigned long flags; |
| 759 | struct q_inval *qi; |
| 760 | |
| 761 | if (!ecap_qis(iommu->ecap)) |
| 762 | return -ENOENT; |
| 763 | |
| 764 | /* |
| 765 | * queued invalidation is already setup and enabled. |
| 766 | */ |
| 767 | if (iommu->qi) |
| 768 | return 0; |
| 769 | |
| 770 | iommu->qi = kmalloc(sizeof(*qi), GFP_KERNEL); |
| 771 | if (!iommu->qi) |
| 772 | return -ENOMEM; |
| 773 | |
| 774 | qi = iommu->qi; |
| 775 | |
| 776 | qi->desc = (void *)(get_zeroed_page(GFP_KERNEL)); |
| 777 | if (!qi->desc) { |
| 778 | kfree(qi); |
| 779 | iommu->qi = 0; |
| 780 | return -ENOMEM; |
| 781 | } |
| 782 | |
| 783 | qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_KERNEL); |
| 784 | if (!qi->desc_status) { |
| 785 | free_page((unsigned long) qi->desc); |
| 786 | kfree(qi); |
| 787 | iommu->qi = 0; |
| 788 | return -ENOMEM; |
| 789 | } |
| 790 | |
| 791 | qi->free_head = qi->free_tail = 0; |
| 792 | qi->free_cnt = QI_LENGTH; |
| 793 | |
| 794 | spin_lock_init(&qi->q_lock); |
| 795 | |
| 796 | spin_lock_irqsave(&iommu->register_lock, flags); |
| 797 | /* write zero to the tail reg */ |
| 798 | writel(0, iommu->reg + DMAR_IQT_REG); |
| 799 | |
| 800 | dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); |
| 801 | |
| 802 | cmd = iommu->gcmd | DMA_GCMD_QIE; |
| 803 | iommu->gcmd |= DMA_GCMD_QIE; |
| 804 | writel(cmd, iommu->reg + DMAR_GCMD_REG); |
| 805 | |
| 806 | /* Make sure hardware complete it */ |
| 807 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); |
| 808 | spin_unlock_irqrestore(&iommu->register_lock, flags); |
| 809 | |
| 810 | return 0; |
| 811 | } |