blob: 977070ce4fe9751281f00f73ccb19f2446f7b56c [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020029#define pr_fmt(fmt) "DMAR: " fmt
Donald Dutilee9071b02012-06-08 17:13:11 -040030
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070031#include <linux/pci.h>
32#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030033#include <linux/iova.h>
34#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070035#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070036#include <linux/irq.h>
37#include <linux/interrupt.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Len Browneb27cae2009-07-06 23:40:19 -040039#include <linux/dmi.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Alex Williamsona5459cf2014-06-12 16:12:31 -060041#include <linux/iommu.h>
Daniel Drake159ba992020-03-12 14:09:55 +080042#include <linux/limits.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070043#include <asm/irq_remapping.h>
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -040044#include <asm/iommu_table.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070045
Joerg Roedel078e1ee2012-09-26 12:44:43 +020046#include "irq_remapping.h"
47
Jiang Liuc2a0b532014-11-09 22:47:56 +080048typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
49struct dmar_res_callback {
50 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
51 void *arg[ACPI_DMAR_TYPE_RESERVED];
52 bool ignore_unhandled;
53 bool print_entry;
54};
55
Jiang Liu3a5670e2014-02-19 14:07:33 +080056/*
57 * Assumptions:
58 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
59 * before IO devices managed by that unit.
60 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
61 * after IO devices managed by that unit.
62 * 3) Hotplug events are rare.
63 *
64 * Locking rules for DMA and interrupt remapping related global data structures:
65 * 1) Use dmar_global_lock in process context
66 * 2) Use RCU in interrupt context
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070067 */
Jiang Liu3a5670e2014-02-19 14:07:33 +080068DECLARE_RWSEM(dmar_global_lock);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070069LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070070
Suresh Siddha41750d32011-08-23 17:05:18 -070071struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080072static acpi_size dmar_tbl_size;
Jiang Liu2e455282014-02-19 14:07:36 +080073static int dmar_dev_scope_status = 1;
Jiang Liu78d8e702014-11-09 22:47:57 +080074static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070075
Jiang Liu694835d2014-01-06 14:18:16 +080076static int alloc_iommu(struct dmar_drhd_unit *drhd);
Jiang Liua868e6b2014-01-06 14:18:20 +080077static void free_iommu(struct intel_iommu *iommu);
Jiang Liu694835d2014-01-06 14:18:16 +080078
Jiang Liu6b197242014-11-09 22:47:58 +080079static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070080{
81 /*
82 * add INCLUDE_ALL at the tail, so scan the list will find it at
83 * the very end.
84 */
85 if (drhd->include_all)
Jiang Liu0e242612014-02-19 14:07:34 +080086 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070087 else
Jiang Liu0e242612014-02-19 14:07:34 +080088 list_add_rcu(&drhd->list, &dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070089}
90
Jiang Liubb3a6b72014-02-19 14:07:24 +080091void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070092{
93 struct acpi_dmar_device_scope *scope;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070094
95 *cnt = 0;
96 while (start < end) {
97 scope = start;
Bob Moore83118b02014-07-30 12:21:00 +080098 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
David Woodhouse07cb52f2014-03-07 14:39:27 +000099 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700100 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
101 (*cnt)++;
Linn Crosettoae3e7f32013-04-23 12:26:45 -0600102 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
103 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400104 pr_warn("Unsupported device scope\n");
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100105 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700106 start += scope->length;
107 }
108 if (*cnt == 0)
Jiang Liubb3a6b72014-02-19 14:07:24 +0800109 return NULL;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700110
David Woodhouse832bd852014-03-07 15:08:36 +0000111 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
Jiang Liubb3a6b72014-02-19 14:07:24 +0800112}
113
David Woodhouse832bd852014-03-07 15:08:36 +0000114void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
Jiang Liuada4d4b2014-01-06 14:18:09 +0800115{
Jiang Liub683b232014-02-19 14:07:32 +0800116 int i;
David Woodhouse832bd852014-03-07 15:08:36 +0000117 struct device *tmp_dev;
Jiang Liub683b232014-02-19 14:07:32 +0800118
Jiang Liuada4d4b2014-01-06 14:18:09 +0800119 if (*devices && *cnt) {
Jiang Liub683b232014-02-19 14:07:32 +0800120 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
David Woodhouse832bd852014-03-07 15:08:36 +0000121 put_device(tmp_dev);
Jiang Liuada4d4b2014-01-06 14:18:09 +0800122 kfree(*devices);
Jiang Liuada4d4b2014-01-06 14:18:09 +0800123 }
Jiang Liu0e242612014-02-19 14:07:34 +0800124
125 *devices = NULL;
126 *cnt = 0;
Jiang Liuada4d4b2014-01-06 14:18:09 +0800127}
128
Jiang Liu59ce0512014-02-19 14:07:35 +0800129/* Optimize out kzalloc()/kfree() for normal cases */
130static char dmar_pci_notify_info_buf[64];
131
132static struct dmar_pci_notify_info *
133dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
134{
135 int level = 0;
136 size_t size;
137 struct pci_dev *tmp;
138 struct dmar_pci_notify_info *info;
139
140 BUG_ON(dev->is_virtfn);
141
Daniel Drake159ba992020-03-12 14:09:55 +0800142 /*
143 * Ignore devices that have a domain number higher than what can
144 * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
145 */
146 if (pci_domain_nr(dev->bus) > U16_MAX)
147 return NULL;
148
Jiang Liu59ce0512014-02-19 14:07:35 +0800149 /* Only generate path[] for device addition event */
150 if (event == BUS_NOTIFY_ADD_DEVICE)
151 for (tmp = dev; tmp; tmp = tmp->bus->self)
152 level++;
153
Julia Cartwright0afa6d82019-02-20 16:46:31 +0000154 size = sizeof(*info) + level * sizeof(info->path[0]);
Jiang Liu59ce0512014-02-19 14:07:35 +0800155 if (size <= sizeof(dmar_pci_notify_info_buf)) {
156 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
157 } else {
158 info = kzalloc(size, GFP_KERNEL);
159 if (!info) {
160 pr_warn("Out of memory when allocating notify_info "
161 "for %s.\n", pci_name(dev));
Jiang Liu2e455282014-02-19 14:07:36 +0800162 if (dmar_dev_scope_status == 0)
163 dmar_dev_scope_status = -ENOMEM;
Jiang Liu59ce0512014-02-19 14:07:35 +0800164 return NULL;
165 }
166 }
167
168 info->event = event;
169 info->dev = dev;
170 info->seg = pci_domain_nr(dev->bus);
171 info->level = level;
172 if (event == BUS_NOTIFY_ADD_DEVICE) {
Jiang Liu5ae05662014-04-15 10:35:35 +0800173 for (tmp = dev; tmp; tmp = tmp->bus->self) {
174 level--;
Joerg Roedel57384592014-10-02 11:50:25 +0200175 info->path[level].bus = tmp->bus->number;
Jiang Liu59ce0512014-02-19 14:07:35 +0800176 info->path[level].device = PCI_SLOT(tmp->devfn);
177 info->path[level].function = PCI_FUNC(tmp->devfn);
178 if (pci_is_root_bus(tmp->bus))
179 info->bus = tmp->bus->number;
180 }
181 }
182
183 return info;
184}
185
186static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
187{
188 if ((void *)info != dmar_pci_notify_info_buf)
189 kfree(info);
190}
191
192static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
193 struct acpi_dmar_pci_path *path, int count)
194{
195 int i;
196
197 if (info->bus != bus)
Joerg Roedel80f7b3d2014-09-22 16:30:22 +0200198 goto fallback;
Jiang Liu59ce0512014-02-19 14:07:35 +0800199 if (info->level != count)
Joerg Roedel80f7b3d2014-09-22 16:30:22 +0200200 goto fallback;
Jiang Liu59ce0512014-02-19 14:07:35 +0800201
202 for (i = 0; i < count; i++) {
203 if (path[i].device != info->path[i].device ||
204 path[i].function != info->path[i].function)
Joerg Roedel80f7b3d2014-09-22 16:30:22 +0200205 goto fallback;
Jiang Liu59ce0512014-02-19 14:07:35 +0800206 }
207
208 return true;
Joerg Roedel80f7b3d2014-09-22 16:30:22 +0200209
210fallback:
211
212 if (count != 1)
213 return false;
214
215 i = info->level - 1;
216 if (bus == info->path[i].bus &&
217 path[0].device == info->path[i].device &&
218 path[0].function == info->path[i].function) {
219 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
220 bus, path[0].device, path[0].function);
221 return true;
222 }
223
224 return false;
Jiang Liu59ce0512014-02-19 14:07:35 +0800225}
226
227/* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
228int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
229 void *start, void*end, u16 segment,
David Woodhouse832bd852014-03-07 15:08:36 +0000230 struct dmar_dev_scope *devices,
231 int devices_cnt)
Jiang Liu59ce0512014-02-19 14:07:35 +0800232{
233 int i, level;
David Woodhouse832bd852014-03-07 15:08:36 +0000234 struct device *tmp, *dev = &info->dev->dev;
Jiang Liu59ce0512014-02-19 14:07:35 +0800235 struct acpi_dmar_device_scope *scope;
236 struct acpi_dmar_pci_path *path;
237
238 if (segment != info->seg)
239 return 0;
240
241 for (; start < end; start += scope->length) {
242 scope = start;
243 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
244 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
245 continue;
246
247 path = (struct acpi_dmar_pci_path *)(scope + 1);
248 level = (scope->length - sizeof(*scope)) / sizeof(*path);
249 if (!dmar_match_pci_path(info, scope->bus, path, level))
250 continue;
251
Roland Dreierffb2d1e2016-06-02 17:46:10 -0700252 /*
253 * We expect devices with endpoint scope to have normal PCI
254 * headers, and devices with bridge scope to have bridge PCI
255 * headers. However PCI NTB devices may be listed in the
256 * DMAR table with bridge scope, even though they have a
257 * normal PCI header. NTB devices are identified by class
258 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
259 * for this special case.
260 */
261 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
262 info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
263 (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
264 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
265 info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
Jiang Liu59ce0512014-02-19 14:07:35 +0800266 pr_warn("Device scope type does not match for %s\n",
David Woodhouse832bd852014-03-07 15:08:36 +0000267 pci_name(info->dev));
Jiang Liu59ce0512014-02-19 14:07:35 +0800268 return -EINVAL;
269 }
270
271 for_each_dev_scope(devices, devices_cnt, i, tmp)
272 if (tmp == NULL) {
David Woodhouse832bd852014-03-07 15:08:36 +0000273 devices[i].bus = info->dev->bus->number;
274 devices[i].devfn = info->dev->devfn;
275 rcu_assign_pointer(devices[i].dev,
276 get_device(dev));
Jiang Liu59ce0512014-02-19 14:07:35 +0800277 return 1;
278 }
279 BUG_ON(i >= devices_cnt);
280 }
281
282 return 0;
283}
284
285int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
David Woodhouse832bd852014-03-07 15:08:36 +0000286 struct dmar_dev_scope *devices, int count)
Jiang Liu59ce0512014-02-19 14:07:35 +0800287{
288 int index;
David Woodhouse832bd852014-03-07 15:08:36 +0000289 struct device *tmp;
Jiang Liu59ce0512014-02-19 14:07:35 +0800290
291 if (info->seg != segment)
292 return 0;
293
294 for_each_active_dev_scope(devices, count, index, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +0000295 if (tmp == &info->dev->dev) {
Andreea-Cristina Bernateecbad72014-08-18 15:20:56 +0300296 RCU_INIT_POINTER(devices[index].dev, NULL);
Jiang Liu59ce0512014-02-19 14:07:35 +0800297 synchronize_rcu();
David Woodhouse832bd852014-03-07 15:08:36 +0000298 put_device(tmp);
Jiang Liu59ce0512014-02-19 14:07:35 +0800299 return 1;
300 }
301
302 return 0;
303}
304
305static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
306{
307 int ret = 0;
308 struct dmar_drhd_unit *dmaru;
309 struct acpi_dmar_hardware_unit *drhd;
310
311 for_each_drhd_unit(dmaru) {
312 if (dmaru->include_all)
313 continue;
314
315 drhd = container_of(dmaru->hdr,
316 struct acpi_dmar_hardware_unit, header);
317 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
318 ((void *)drhd) + drhd->header.length,
319 dmaru->segment,
320 dmaru->devices, dmaru->devices_cnt);
321 if (ret != 0)
322 break;
323 }
324 if (ret >= 0)
325 ret = dmar_iommu_notify_scope_dev(info);
Jiang Liu2e455282014-02-19 14:07:36 +0800326 if (ret < 0 && dmar_dev_scope_status == 0)
327 dmar_dev_scope_status = ret;
Jiang Liu59ce0512014-02-19 14:07:35 +0800328
329 return ret;
330}
331
332static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
333{
334 struct dmar_drhd_unit *dmaru;
335
336 for_each_drhd_unit(dmaru)
337 if (dmar_remove_dev_scope(info, dmaru->segment,
338 dmaru->devices, dmaru->devices_cnt))
339 break;
340 dmar_iommu_notify_scope_dev(info);
341}
342
343static int dmar_pci_bus_notifier(struct notifier_block *nb,
344 unsigned long action, void *data)
345{
346 struct pci_dev *pdev = to_pci_dev(data);
347 struct dmar_pci_notify_info *info;
348
Ashok Raj1c387182016-10-21 15:32:05 -0700349 /* Only care about add/remove events for physical functions.
350 * For VFs we actually do the lookup based on the corresponding
351 * PF in device_to_iommu() anyway. */
Jiang Liu59ce0512014-02-19 14:07:35 +0800352 if (pdev->is_virtfn)
353 return NOTIFY_DONE;
Joerg Roedele6a8c9b2016-02-29 23:49:47 +0100354 if (action != BUS_NOTIFY_ADD_DEVICE &&
355 action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu59ce0512014-02-19 14:07:35 +0800356 return NOTIFY_DONE;
357
358 info = dmar_alloc_pci_notify_info(pdev, action);
359 if (!info)
360 return NOTIFY_DONE;
361
362 down_write(&dmar_global_lock);
363 if (action == BUS_NOTIFY_ADD_DEVICE)
364 dmar_pci_bus_add_dev(info);
Joerg Roedele6a8c9b2016-02-29 23:49:47 +0100365 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu59ce0512014-02-19 14:07:35 +0800366 dmar_pci_bus_del_dev(info);
367 up_write(&dmar_global_lock);
368
369 dmar_free_pci_notify_info(info);
370
371 return NOTIFY_OK;
372}
373
374static struct notifier_block dmar_pci_bus_nb = {
375 .notifier_call = dmar_pci_bus_notifier,
376 .priority = INT_MIN,
377};
378
Jiang Liu6b197242014-11-09 22:47:58 +0800379static struct dmar_drhd_unit *
380dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
381{
382 struct dmar_drhd_unit *dmaru;
383
384 list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
385 if (dmaru->segment == drhd->segment &&
386 dmaru->reg_base_addr == drhd->address)
387 return dmaru;
388
389 return NULL;
390}
391
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700392/**
393 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
394 * structure which uniquely represent one DMA remapping hardware unit
395 * present in the platform
396 */
Jiang Liu6b197242014-11-09 22:47:58 +0800397static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700398{
399 struct acpi_dmar_hardware_unit *drhd;
400 struct dmar_drhd_unit *dmaru;
401 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700402
David Woodhousee523b382009-04-10 22:27:48 -0700403 drhd = (struct acpi_dmar_hardware_unit *)header;
Jiang Liu6b197242014-11-09 22:47:58 +0800404 dmaru = dmar_find_dmaru(drhd);
405 if (dmaru)
406 goto out;
407
408 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700409 if (!dmaru)
410 return -ENOMEM;
411
Jiang Liu6b197242014-11-09 22:47:58 +0800412 /*
413 * If header is allocated from slab by ACPI _DSM method, we need to
414 * copy the content because the memory buffer will be freed on return.
415 */
416 dmaru->hdr = (void *)(dmaru + 1);
417 memcpy(dmaru->hdr, header, header->length);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700418 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf992009-04-04 01:45:37 +0100419 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700420 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
David Woodhouse07cb52f2014-03-07 14:39:27 +0000421 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
422 ((void *)drhd) + drhd->header.length,
423 &dmaru->devices_cnt);
424 if (dmaru->devices_cnt && dmaru->devices == NULL) {
425 kfree(dmaru);
426 return -ENOMEM;
Jiang Liu2e455282014-02-19 14:07:36 +0800427 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700428
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700429 ret = alloc_iommu(dmaru);
430 if (ret) {
David Woodhouse07cb52f2014-03-07 14:39:27 +0000431 dmar_free_dev_scope(&dmaru->devices,
432 &dmaru->devices_cnt);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700433 kfree(dmaru);
434 return ret;
435 }
436 dmar_register_drhd_unit(dmaru);
Jiang Liuc2a0b532014-11-09 22:47:56 +0800437
Jiang Liu6b197242014-11-09 22:47:58 +0800438out:
Jiang Liuc2a0b532014-11-09 22:47:56 +0800439 if (arg)
440 (*(int *)arg)++;
441
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700442 return 0;
443}
444
Jiang Liua868e6b2014-01-06 14:18:20 +0800445static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
446{
447 if (dmaru->devices && dmaru->devices_cnt)
448 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
449 if (dmaru->iommu)
450 free_iommu(dmaru->iommu);
451 kfree(dmaru);
452}
453
Jiang Liuc2a0b532014-11-09 22:47:56 +0800454static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
455 void *arg)
David Woodhousee625b4a2014-03-07 14:34:38 +0000456{
457 struct acpi_dmar_andd *andd = (void *)header;
458
459 /* Check for NUL termination within the designated length */
Bob Moore83118b02014-07-30 12:21:00 +0800460 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
Hans de Goede5145afc2020-03-09 15:01:37 +0100461 pr_warn(FW_BUG
David Woodhousee625b4a2014-03-07 14:34:38 +0000462 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
463 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
464 dmi_get_system_info(DMI_BIOS_VENDOR),
465 dmi_get_system_info(DMI_BIOS_VERSION),
466 dmi_get_system_info(DMI_PRODUCT_VERSION));
Hans de Goede5145afc2020-03-09 15:01:37 +0100467 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
David Woodhousee625b4a2014-03-07 14:34:38 +0000468 return -EINVAL;
469 }
470 pr_info("ANDD device: %x name: %s\n", andd->device_number,
Bob Moore83118b02014-07-30 12:21:00 +0800471 andd->device_name);
David Woodhousee625b4a2014-03-07 14:34:38 +0000472
473 return 0;
474}
475
David Woodhouseaa697072009-10-07 12:18:00 +0100476#ifdef CONFIG_ACPI_NUMA
Jiang Liu6b197242014-11-09 22:47:58 +0800477static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
Suresh Siddhaee34b322009-10-02 11:01:21 -0700478{
479 struct acpi_dmar_rhsa *rhsa;
480 struct dmar_drhd_unit *drhd;
481
482 rhsa = (struct acpi_dmar_rhsa *)header;
David Woodhouseaa697072009-10-07 12:18:00 +0100483 for_each_drhd_unit(drhd) {
Suresh Siddhaee34b322009-10-02 11:01:21 -0700484 if (drhd->reg_base_addr == rhsa->base_address) {
485 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
486
487 if (!node_online(node))
488 node = -1;
489 drhd->iommu->node = node;
David Woodhouseaa697072009-10-07 12:18:00 +0100490 return 0;
491 }
Suresh Siddhaee34b322009-10-02 11:01:21 -0700492 }
Hans de Goede5145afc2020-03-09 15:01:37 +0100493 pr_warn(FW_BUG
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100494 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
495 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
Zhenzhong Duan1d66a552020-03-12 14:09:54 +0800496 rhsa->base_address,
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100497 dmi_get_system_info(DMI_BIOS_VENDOR),
498 dmi_get_system_info(DMI_BIOS_VERSION),
499 dmi_get_system_info(DMI_PRODUCT_VERSION));
Hans de Goede5145afc2020-03-09 15:01:37 +0100500 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
Suresh Siddhaee34b322009-10-02 11:01:21 -0700501
David Woodhouseaa697072009-10-07 12:18:00 +0100502 return 0;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700503}
Jiang Liuc2a0b532014-11-09 22:47:56 +0800504#else
505#define dmar_parse_one_rhsa dmar_res_noop
David Woodhouseaa697072009-10-07 12:18:00 +0100506#endif
Suresh Siddhaee34b322009-10-02 11:01:21 -0700507
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700508static void __init
509dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
510{
511 struct acpi_dmar_hardware_unit *drhd;
512 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800513 struct acpi_dmar_atsr *atsr;
Roland Dreier17b60972009-09-24 12:14:00 -0700514 struct acpi_dmar_rhsa *rhsa;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700515
516 switch (header->type) {
517 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800518 drhd = container_of(header, struct acpi_dmar_hardware_unit,
519 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400520 pr_info("DRHD base: %#016Lx flags: %#x\n",
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800521 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700522 break;
523 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800524 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
525 header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400526 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700527 (unsigned long long)rmrr->base_address,
528 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700529 break;
Bob Moore83118b02014-07-30 12:21:00 +0800530 case ACPI_DMAR_TYPE_ROOT_ATS:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800531 atsr = container_of(header, struct acpi_dmar_atsr, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400532 pr_info("ATSR flags: %#x\n", atsr->flags);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800533 break;
Bob Moore83118b02014-07-30 12:21:00 +0800534 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
Roland Dreier17b60972009-09-24 12:14:00 -0700535 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
Donald Dutilee9071b02012-06-08 17:13:11 -0400536 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
Roland Dreier17b60972009-09-24 12:14:00 -0700537 (unsigned long long)rhsa->base_address,
538 rhsa->proximity_domain);
539 break;
Bob Moore83118b02014-07-30 12:21:00 +0800540 case ACPI_DMAR_TYPE_NAMESPACE:
David Woodhousee625b4a2014-03-07 14:34:38 +0000541 /* We don't print this here because we need to sanity-check
542 it first. So print it in dmar_parse_one_andd() instead. */
543 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700544 }
545}
546
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700547/**
548 * dmar_table_detect - checks to see if the platform supports DMAR devices
549 */
550static int __init dmar_table_detect(void)
551{
552 acpi_status status = AE_OK;
553
554 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800555 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
556 (struct acpi_table_header **)&dmar_tbl,
557 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700558
559 if (ACPI_SUCCESS(status) && !dmar_tbl) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400560 pr_warn("Unable to map DMAR\n");
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700561 status = AE_NOT_FOUND;
562 }
563
564 return (ACPI_SUCCESS(status) ? 1 : 0);
565}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700566
Jiang Liuc2a0b532014-11-09 22:47:56 +0800567static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
568 size_t len, struct dmar_res_callback *cb)
569{
570 int ret = 0;
571 struct acpi_dmar_header *iter, *next;
572 struct acpi_dmar_header *end = ((void *)start) + len;
573
574 for (iter = start; iter < end && ret == 0; iter = next) {
575 next = (void *)iter + iter->length;
576 if (iter->length == 0) {
577 /* Avoid looping forever on bad ACPI tables */
578 pr_debug(FW_BUG "Invalid 0-length structure\n");
579 break;
580 } else if (next > end) {
581 /* Avoid passing table end */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200582 pr_warn(FW_BUG "Record passes table end\n");
Jiang Liuc2a0b532014-11-09 22:47:56 +0800583 ret = -EINVAL;
584 break;
585 }
586
587 if (cb->print_entry)
588 dmar_table_print_dmar_entry(iter);
589
590 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
591 /* continue for forward compatibility */
592 pr_debug("Unknown DMAR structure type %d\n",
593 iter->type);
594 } else if (cb->cb[iter->type]) {
595 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
596 } else if (!cb->ignore_unhandled) {
597 pr_warn("No handler for DMAR structure type %d\n",
598 iter->type);
599 ret = -EINVAL;
600 }
601 }
602
603 return ret;
604}
605
606static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
607 struct dmar_res_callback *cb)
608{
609 return dmar_walk_remapping_entries((void *)(dmar + 1),
610 dmar->header.length - sizeof(*dmar), cb);
611}
612
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700613/**
614 * parse_dmar_table - parses the DMA reporting table
615 */
616static int __init
617parse_dmar_table(void)
618{
619 struct acpi_table_dmar *dmar;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700620 int ret = 0;
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800621 int drhd_count = 0;
Jiang Liuc2a0b532014-11-09 22:47:56 +0800622 struct dmar_res_callback cb = {
623 .print_entry = true,
624 .ignore_unhandled = true,
625 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
626 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
627 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
628 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
629 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
630 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
631 };
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700632
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700633 /*
634 * Do it again, earlier dmar_tbl mapping could be mapped with
635 * fixed map.
636 */
637 dmar_table_detect();
638
Joseph Cihulaa59b50e2009-06-30 19:31:10 -0700639 /*
640 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
641 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
642 */
643 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
644
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700645 dmar = (struct acpi_table_dmar *)dmar_tbl;
646 if (!dmar)
647 return -ENODEV;
648
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700649 if (dmar->width < PAGE_SHIFT - 1) {
Donald Dutilee9071b02012-06-08 17:13:11 -0400650 pr_warn("Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700651 return -EINVAL;
652 }
653
Donald Dutilee9071b02012-06-08 17:13:11 -0400654 pr_info("Host address width %d\n", dmar->width + 1);
Jiang Liuc2a0b532014-11-09 22:47:56 +0800655 ret = dmar_walk_dmar_table(dmar, &cb);
656 if (ret == 0 && drhd_count == 0)
Li, Zhen-Hua7cef3342013-05-20 15:57:32 +0800657 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
Jiang Liuc2a0b532014-11-09 22:47:56 +0800658
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700659 return ret;
660}
661
David Woodhouse832bd852014-03-07 15:08:36 +0000662static int dmar_pci_device_match(struct dmar_dev_scope devices[],
663 int cnt, struct pci_dev *dev)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700664{
665 int index;
David Woodhouse832bd852014-03-07 15:08:36 +0000666 struct device *tmp;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700667
668 while (dev) {
Jiang Liub683b232014-02-19 14:07:32 +0800669 for_each_active_dev_scope(devices, cnt, index, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +0000670 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700671 return 1;
672
673 /* Check our parent */
674 dev = dev->bus->self;
675 }
676
677 return 0;
678}
679
680struct dmar_drhd_unit *
681dmar_find_matched_drhd_unit(struct pci_dev *dev)
682{
Jiang Liu0e242612014-02-19 14:07:34 +0800683 struct dmar_drhd_unit *dmaru;
Yu Zhao2e824f72008-12-22 16:54:58 +0800684 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700685
Yinghaidda56542010-04-09 01:07:55 +0100686 dev = pci_physfn(dev);
687
Jiang Liu0e242612014-02-19 14:07:34 +0800688 rcu_read_lock();
Yijing Wang8b161f02013-10-31 17:25:16 +0800689 for_each_drhd_unit(dmaru) {
Yu Zhao2e824f72008-12-22 16:54:58 +0800690 drhd = container_of(dmaru->hdr,
691 struct acpi_dmar_hardware_unit,
692 header);
693
694 if (dmaru->include_all &&
695 drhd->segment == pci_domain_nr(dev->bus))
Jiang Liu0e242612014-02-19 14:07:34 +0800696 goto out;
Yu Zhao2e824f72008-12-22 16:54:58 +0800697
698 if (dmar_pci_device_match(dmaru->devices,
699 dmaru->devices_cnt, dev))
Jiang Liu0e242612014-02-19 14:07:34 +0800700 goto out;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700701 }
Jiang Liu0e242612014-02-19 14:07:34 +0800702 dmaru = NULL;
703out:
704 rcu_read_unlock();
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700705
Jiang Liu0e242612014-02-19 14:07:34 +0800706 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700707}
708
David Woodhouseed403562014-03-07 23:15:42 +0000709static void __init dmar_acpi_insert_dev_scope(u8 device_number,
710 struct acpi_device *adev)
711{
712 struct dmar_drhd_unit *dmaru;
713 struct acpi_dmar_hardware_unit *drhd;
714 struct acpi_dmar_device_scope *scope;
715 struct device *tmp;
716 int i;
717 struct acpi_dmar_pci_path *path;
718
719 for_each_drhd_unit(dmaru) {
720 drhd = container_of(dmaru->hdr,
721 struct acpi_dmar_hardware_unit,
722 header);
723
724 for (scope = (void *)(drhd + 1);
725 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
726 scope = ((void *)scope) + scope->length) {
Bob Moore83118b02014-07-30 12:21:00 +0800727 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
David Woodhouseed403562014-03-07 23:15:42 +0000728 continue;
729 if (scope->enumeration_id != device_number)
730 continue;
731
732 path = (void *)(scope + 1);
733 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
734 dev_name(&adev->dev), dmaru->reg_base_addr,
735 scope->bus, path->device, path->function);
736 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
737 if (tmp == NULL) {
738 dmaru->devices[i].bus = scope->bus;
739 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
740 path->function);
741 rcu_assign_pointer(dmaru->devices[i].dev,
742 get_device(&adev->dev));
743 return;
744 }
745 BUG_ON(i >= dmaru->devices_cnt);
746 }
747 }
748 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
749 device_number, dev_name(&adev->dev));
750}
751
752static int __init dmar_acpi_dev_scope_init(void)
753{
Joerg Roedel11f1a772014-03-25 20:16:40 +0100754 struct acpi_dmar_andd *andd;
755
756 if (dmar_tbl == NULL)
757 return -ENODEV;
758
David Woodhouse7713ec02014-04-01 14:58:36 +0100759 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
760 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
761 andd = ((void *)andd) + andd->header.length) {
Bob Moore83118b02014-07-30 12:21:00 +0800762 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
David Woodhouseed403562014-03-07 23:15:42 +0000763 acpi_handle h;
764 struct acpi_device *adev;
765
766 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
Bob Moore83118b02014-07-30 12:21:00 +0800767 andd->device_name,
David Woodhouseed403562014-03-07 23:15:42 +0000768 &h))) {
769 pr_err("Failed to find handle for ACPI object %s\n",
Bob Moore83118b02014-07-30 12:21:00 +0800770 andd->device_name);
David Woodhouseed403562014-03-07 23:15:42 +0000771 continue;
772 }
Joerg Roedelc0df9752014-08-21 23:06:48 +0200773 if (acpi_bus_get_device(h, &adev)) {
David Woodhouseed403562014-03-07 23:15:42 +0000774 pr_err("Failed to get device for ACPI object %s\n",
Bob Moore83118b02014-07-30 12:21:00 +0800775 andd->device_name);
David Woodhouseed403562014-03-07 23:15:42 +0000776 continue;
777 }
778 dmar_acpi_insert_dev_scope(andd->device_number, adev);
779 }
David Woodhouseed403562014-03-07 23:15:42 +0000780 }
781 return 0;
782}
783
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700784int __init dmar_dev_scope_init(void)
785{
Jiang Liu2e455282014-02-19 14:07:36 +0800786 struct pci_dev *dev = NULL;
787 struct dmar_pci_notify_info *info;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700788
Jiang Liu2e455282014-02-19 14:07:36 +0800789 if (dmar_dev_scope_status != 1)
790 return dmar_dev_scope_status;
Suresh Siddhac2c72862011-08-23 17:05:19 -0700791
Jiang Liu2e455282014-02-19 14:07:36 +0800792 if (list_empty(&dmar_drhd_units)) {
793 dmar_dev_scope_status = -ENODEV;
794 } else {
795 dmar_dev_scope_status = 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700796
David Woodhouse63b42622014-03-28 11:28:40 +0000797 dmar_acpi_dev_scope_init();
798
Jiang Liu2e455282014-02-19 14:07:36 +0800799 for_each_pci_dev(dev) {
800 if (dev->is_virtfn)
801 continue;
802
803 info = dmar_alloc_pci_notify_info(dev,
804 BUS_NOTIFY_ADD_DEVICE);
805 if (!info) {
806 return dmar_dev_scope_status;
807 } else {
808 dmar_pci_bus_add_dev(info);
809 dmar_free_pci_notify_info(info);
810 }
811 }
812
813 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700814 }
815
Jiang Liu2e455282014-02-19 14:07:36 +0800816 return dmar_dev_scope_status;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700817}
818
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700819
820int __init dmar_table_init(void)
821{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700822 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800823 int ret;
824
Jiang Liucc053012014-01-06 14:18:24 +0800825 if (dmar_table_initialized == 0) {
826 ret = parse_dmar_table();
827 if (ret < 0) {
828 if (ret != -ENODEV)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200829 pr_info("Parse DMAR table failure.\n");
Jiang Liucc053012014-01-06 14:18:24 +0800830 } else if (list_empty(&dmar_drhd_units)) {
831 pr_info("No DMAR devices found\n");
832 ret = -ENODEV;
833 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700834
Jiang Liucc053012014-01-06 14:18:24 +0800835 if (ret < 0)
836 dmar_table_initialized = ret;
837 else
838 dmar_table_initialized = 1;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800839 }
840
Jiang Liucc053012014-01-06 14:18:24 +0800841 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700842}
843
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100844static void warn_invalid_dmar(u64 addr, const char *message)
845{
Hans de Goede5145afc2020-03-09 15:01:37 +0100846 pr_warn_once(FW_BUG
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100847 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
848 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
849 addr, message,
850 dmi_get_system_info(DMI_BIOS_VENDOR),
851 dmi_get_system_info(DMI_BIOS_VERSION),
852 dmi_get_system_info(DMI_PRODUCT_VERSION));
Hans de Goede5145afc2020-03-09 15:01:37 +0100853 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100854}
David Woodhouse6ecbf012009-12-02 09:20:27 +0000855
Jiang Liuc2a0b532014-11-09 22:47:56 +0800856static int __ref
857dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
David Woodhouse86cf8982009-11-09 22:15:15 +0000858{
David Woodhouse86cf8982009-11-09 22:15:15 +0000859 struct acpi_dmar_hardware_unit *drhd;
Jiang Liuc2a0b532014-11-09 22:47:56 +0800860 void __iomem *addr;
861 u64 cap, ecap;
David Woodhouse86cf8982009-11-09 22:15:15 +0000862
Jiang Liuc2a0b532014-11-09 22:47:56 +0800863 drhd = (void *)entry;
864 if (!drhd->address) {
865 warn_invalid_dmar(0, "");
866 return -EINVAL;
David Woodhouse86cf8982009-11-09 22:15:15 +0000867 }
Chris Wright2c992202009-12-02 09:17:13 +0000868
Jiang Liu6b197242014-11-09 22:47:58 +0800869 if (arg)
870 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
871 else
872 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
Jiang Liuc2a0b532014-11-09 22:47:56 +0800873 if (!addr) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200874 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
Jiang Liuc2a0b532014-11-09 22:47:56 +0800875 return -EINVAL;
876 }
Jiang Liu6b197242014-11-09 22:47:58 +0800877
Jiang Liuc2a0b532014-11-09 22:47:56 +0800878 cap = dmar_readq(addr + DMAR_CAP_REG);
879 ecap = dmar_readq(addr + DMAR_ECAP_REG);
Jiang Liu6b197242014-11-09 22:47:58 +0800880
881 if (arg)
882 iounmap(addr);
883 else
884 early_iounmap(addr, VTD_PAGE_SIZE);
Jiang Liuc2a0b532014-11-09 22:47:56 +0800885
886 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
887 warn_invalid_dmar(drhd->address, " returns all ones");
888 return -EINVAL;
889 }
890
Chris Wright2c992202009-12-02 09:17:13 +0000891 return 0;
David Woodhouse86cf8982009-11-09 22:15:15 +0000892}
893
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400894int __init detect_intel_iommu(void)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700895{
896 int ret;
Jiang Liuc2a0b532014-11-09 22:47:56 +0800897 struct dmar_res_callback validate_drhd_cb = {
898 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
899 .ignore_unhandled = true,
900 };
Suresh Siddha2ae21012008-07-10 11:16:43 -0700901
Jiang Liu3a5670e2014-02-19 14:07:33 +0800902 down_write(&dmar_global_lock);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700903 ret = dmar_table_detect();
David Woodhouse86cf8982009-11-09 22:15:15 +0000904 if (ret)
Jiang Liuc2a0b532014-11-09 22:47:56 +0800905 ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
906 &validate_drhd_cb);
907 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
908 iommu_detected = 1;
909 /* Make sure ACS will be enabled */
910 pci_request_acs();
911 }
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700912
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900913#ifdef CONFIG_X86
Jiang Liuc2a0b532014-11-09 22:47:56 +0800914 if (ret)
915 x86_init.iommu.iommu_init = intel_iommu_init;
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900916#endif
Jiang Liuc2a0b532014-11-09 22:47:56 +0800917
Jiang Liub707cb02014-01-06 14:18:26 +0800918 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700919 dmar_tbl = NULL;
Jiang Liu3a5670e2014-02-19 14:07:33 +0800920 up_write(&dmar_global_lock);
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400921
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -0400922 return ret ? 1 : -ENODEV;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700923}
924
925
Donald Dutile6f5cf522012-06-04 17:29:02 -0400926static void unmap_iommu(struct intel_iommu *iommu)
927{
928 iounmap(iommu->reg);
929 release_mem_region(iommu->reg_phys, iommu->reg_size);
930}
931
932/**
933 * map_iommu: map the iommu's registers
934 * @iommu: the iommu to map
935 * @phys_addr: the physical address of the base resgister
Donald Dutilee9071b02012-06-08 17:13:11 -0400936 *
Donald Dutile6f5cf522012-06-04 17:29:02 -0400937 * Memory map the iommu's registers. Start w/ a single page, and
Donald Dutilee9071b02012-06-08 17:13:11 -0400938 * possibly expand if that turns out to be insufficent.
Donald Dutile6f5cf522012-06-04 17:29:02 -0400939 */
940static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
941{
942 int map_size, err=0;
943
944 iommu->reg_phys = phys_addr;
945 iommu->reg_size = VTD_PAGE_SIZE;
946
947 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200948 pr_err("Can't reserve memory\n");
Donald Dutile6f5cf522012-06-04 17:29:02 -0400949 err = -EBUSY;
950 goto out;
951 }
952
953 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
954 if (!iommu->reg) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200955 pr_err("Can't map the region\n");
Donald Dutile6f5cf522012-06-04 17:29:02 -0400956 err = -ENOMEM;
957 goto release;
958 }
959
960 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
961 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
962
963 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
964 err = -EINVAL;
965 warn_invalid_dmar(phys_addr, " returns all ones");
966 goto unmap;
967 }
968
969 /* the registers might be more than one page */
970 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
971 cap_max_fault_reg_offset(iommu->cap));
972 map_size = VTD_PAGE_ALIGN(map_size);
973 if (map_size > iommu->reg_size) {
974 iounmap(iommu->reg);
975 release_mem_region(iommu->reg_phys, iommu->reg_size);
976 iommu->reg_size = map_size;
977 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
978 iommu->name)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200979 pr_err("Can't reserve memory\n");
Donald Dutile6f5cf522012-06-04 17:29:02 -0400980 err = -EBUSY;
981 goto out;
982 }
983 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
984 if (!iommu->reg) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200985 pr_err("Can't map the region\n");
Donald Dutile6f5cf522012-06-04 17:29:02 -0400986 err = -ENOMEM;
987 goto release;
988 }
989 }
990 err = 0;
991 goto out;
992
993unmap:
994 iounmap(iommu->reg);
995release:
996 release_mem_region(iommu->reg_phys, iommu->reg_size);
997out:
998 return err;
999}
1000
Jiang Liu78d8e702014-11-09 22:47:57 +08001001static int dmar_alloc_seq_id(struct intel_iommu *iommu)
1002{
1003 iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
1004 DMAR_UNITS_SUPPORTED);
1005 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
1006 iommu->seq_id = -1;
1007 } else {
1008 set_bit(iommu->seq_id, dmar_seq_ids);
1009 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1010 }
1011
1012 return iommu->seq_id;
1013}
1014
1015static void dmar_free_seq_id(struct intel_iommu *iommu)
1016{
1017 if (iommu->seq_id >= 0) {
1018 clear_bit(iommu->seq_id, dmar_seq_ids);
1019 iommu->seq_id = -1;
1020 }
1021}
1022
Jiang Liu694835d2014-01-06 14:18:16 +08001023static int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001024{
Suresh Siddhac42d9f32008-07-10 11:16:36 -07001025 struct intel_iommu *iommu;
Takao Indoh3a93c842013-04-23 17:35:03 +09001026 u32 ver, sts;
Joerg Roedel43f73922009-01-03 23:56:27 +01001027 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001028 int msagaw = 0;
Donald Dutile6f5cf522012-06-04 17:29:02 -04001029 int err;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07001030
David Woodhouse6ecbf012009-12-02 09:20:27 +00001031 if (!drhd->reg_base_addr) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +01001032 warn_invalid_dmar(0, "");
David Woodhouse6ecbf012009-12-02 09:20:27 +00001033 return -EINVAL;
1034 }
1035
Suresh Siddhac42d9f32008-07-10 11:16:36 -07001036 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1037 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -07001038 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07001039
Jiang Liu78d8e702014-11-09 22:47:57 +08001040 if (dmar_alloc_seq_id(iommu) < 0) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001041 pr_err("Failed to allocate seq_id\n");
Jiang Liu78d8e702014-11-09 22:47:57 +08001042 err = -ENOSPC;
1043 goto error;
1044 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001045
Donald Dutile6f5cf522012-06-04 17:29:02 -04001046 err = map_iommu(iommu, drhd->reg_base_addr);
1047 if (err) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001048 pr_err("Failed to map %s\n", iommu->name);
Jiang Liu78d8e702014-11-09 22:47:57 +08001049 goto error_free_seq_id;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001050 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001051
Donald Dutile6f5cf522012-06-04 17:29:02 -04001052 err = -EINVAL;
Weidong Han1b573682008-12-08 15:34:06 +08001053 agaw = iommu_calculate_agaw(iommu);
1054 if (agaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001055 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1056 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +01001057 goto err_unmap;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001058 }
1059 msagaw = iommu_calculate_max_sagaw(iommu);
1060 if (msagaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001061 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +08001062 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +01001063 goto err_unmap;
Weidong Han1b573682008-12-08 15:34:06 +08001064 }
1065 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001066 iommu->msagaw = msagaw;
David Woodhouse67ccac42014-03-09 13:49:45 -07001067 iommu->segment = drhd->segment;
Weidong Han1b573682008-12-08 15:34:06 +08001068
Suresh Siddhaee34b322009-10-02 11:01:21 -07001069 iommu->node = -1;
1070
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001071 ver = readl(iommu->reg + DMAR_VER_REG);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001072 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1073 iommu->name,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001074 (unsigned long long)drhd->reg_base_addr,
1075 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1076 (unsigned long long)iommu->cap,
1077 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001078
Takao Indoh3a93c842013-04-23 17:35:03 +09001079 /* Reflect status in gcmd */
1080 sts = readl(iommu->reg + DMAR_GSTS_REG);
1081 if (sts & DMA_GSTS_IRES)
1082 iommu->gcmd |= DMA_GCMD_IRE;
1083 if (sts & DMA_GSTS_TES)
1084 iommu->gcmd |= DMA_GCMD_TE;
1085 if (sts & DMA_GSTS_QIES)
1086 iommu->gcmd |= DMA_GCMD_QIE;
1087
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001088 raw_spin_lock_init(&iommu->register_lock);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001089
Joerg Roedelbc847452016-01-07 12:16:51 +01001090 if (intel_iommu_enabled) {
Alex Williamsona5459cf2014-06-12 16:12:31 -06001091 iommu->iommu_dev = iommu_device_create(NULL, iommu,
1092 intel_iommu_groups,
Kees Cook2439d4a2015-07-24 16:27:57 -07001093 "%s", iommu->name);
Alex Williamsona5459cf2014-06-12 16:12:31 -06001094
Joerg Roedelbc847452016-01-07 12:16:51 +01001095 if (IS_ERR(iommu->iommu_dev)) {
1096 err = PTR_ERR(iommu->iommu_dev);
1097 goto err_unmap;
1098 }
Nicholas Krause59203372016-01-04 18:27:57 -05001099 }
1100
Joerg Roedelbc847452016-01-07 12:16:51 +01001101 drhd->iommu = iommu;
1102
Suresh Siddha1886e8a2008-07-10 11:16:37 -07001103 return 0;
David Woodhouse08155652009-08-04 09:17:20 +01001104
Jiang Liu78d8e702014-11-09 22:47:57 +08001105err_unmap:
Donald Dutile6f5cf522012-06-04 17:29:02 -04001106 unmap_iommu(iommu);
Jiang Liu78d8e702014-11-09 22:47:57 +08001107error_free_seq_id:
1108 dmar_free_seq_id(iommu);
1109error:
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001110 kfree(iommu);
Donald Dutile6f5cf522012-06-04 17:29:02 -04001111 return err;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001112}
1113
Jiang Liua868e6b2014-01-06 14:18:20 +08001114static void free_iommu(struct intel_iommu *iommu)
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001115{
Alex Williamsona5459cf2014-06-12 16:12:31 -06001116 iommu_device_destroy(iommu->iommu_dev);
1117
Jiang Liua868e6b2014-01-06 14:18:20 +08001118 if (iommu->irq) {
David Woodhouse12082252015-10-07 15:37:03 +01001119 if (iommu->pr_irq) {
1120 free_irq(iommu->pr_irq, iommu);
1121 dmar_free_hwirq(iommu->pr_irq);
1122 iommu->pr_irq = 0;
1123 }
Jiang Liua868e6b2014-01-06 14:18:20 +08001124 free_irq(iommu->irq, iommu);
Thomas Gleixnera553b142014-05-07 15:44:11 +00001125 dmar_free_hwirq(iommu->irq);
Jiang Liu34742db2015-04-13 14:11:41 +08001126 iommu->irq = 0;
Jiang Liua868e6b2014-01-06 14:18:20 +08001127 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001128
Jiang Liua84da702014-01-06 14:18:23 +08001129 if (iommu->qi) {
1130 free_page((unsigned long)iommu->qi->desc);
1131 kfree(iommu->qi->desc_status);
1132 kfree(iommu->qi);
1133 }
1134
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001135 if (iommu->reg)
Donald Dutile6f5cf522012-06-04 17:29:02 -04001136 unmap_iommu(iommu);
1137
Jiang Liu78d8e702014-11-09 22:47:57 +08001138 dmar_free_seq_id(iommu);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001139 kfree(iommu);
1140}
Suresh Siddhafe962e92008-07-10 11:16:42 -07001141
1142/*
1143 * Reclaim all the submitted descriptors which have completed its work.
1144 */
1145static inline void reclaim_free_desc(struct q_inval *qi)
1146{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001147 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1148 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001149 qi->desc_status[qi->free_tail] = QI_FREE;
1150 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1151 qi->free_cnt++;
1152 }
1153}
1154
Yu Zhao704126a2009-01-04 16:28:52 +08001155static int qi_check_fault(struct intel_iommu *iommu, int index)
1156{
1157 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001158 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +08001159 struct q_inval *qi = iommu->qi;
1160 int wait_index = (index + 1) % QI_LENGTH;
1161
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001162 if (qi->desc_status[wait_index] == QI_ABORT)
1163 return -EAGAIN;
1164
Yu Zhao704126a2009-01-04 16:28:52 +08001165 fault = readl(iommu->reg + DMAR_FSTS_REG);
1166
1167 /*
1168 * If IQE happens, the head points to the descriptor associated
1169 * with the error. No new descriptors are fetched until the IQE
1170 * is cleared.
1171 */
1172 if (fault & DMA_FSTS_IQE) {
1173 head = readl(iommu->reg + DMAR_IQH_REG);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001174 if ((head >> DMAR_IQ_SHIFT) == index) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001175 pr_err("VT-d detected invalid descriptor: "
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001176 "low=%llx, high=%llx\n",
1177 (unsigned long long)qi->desc[index].low,
1178 (unsigned long long)qi->desc[index].high);
Yu Zhao704126a2009-01-04 16:28:52 +08001179 memcpy(&qi->desc[index], &qi->desc[wait_index],
1180 sizeof(struct qi_desc));
Yu Zhao704126a2009-01-04 16:28:52 +08001181 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1182 return -EINVAL;
1183 }
1184 }
1185
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001186 /*
1187 * If ITE happens, all pending wait_desc commands are aborted.
1188 * No new descriptors are fetched until the ITE is cleared.
1189 */
1190 if (fault & DMA_FSTS_ITE) {
1191 head = readl(iommu->reg + DMAR_IQH_REG);
1192 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1193 head |= 1;
1194 tail = readl(iommu->reg + DMAR_IQT_REG);
1195 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1196
1197 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1198
1199 do {
1200 if (qi->desc_status[head] == QI_IN_USE)
1201 qi->desc_status[head] = QI_ABORT;
1202 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1203 } while (head != tail);
1204
1205 if (qi->desc_status[wait_index] == QI_ABORT)
1206 return -EAGAIN;
1207 }
1208
1209 if (fault & DMA_FSTS_ICE)
1210 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1211
Yu Zhao704126a2009-01-04 16:28:52 +08001212 return 0;
1213}
1214
Suresh Siddhafe962e92008-07-10 11:16:42 -07001215/*
1216 * Submit the queued invalidation descriptor to the remapping
1217 * hardware unit and wait for its completion.
1218 */
Yu Zhao704126a2009-01-04 16:28:52 +08001219int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -07001220{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001221 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001222 struct q_inval *qi = iommu->qi;
1223 struct qi_desc *hw, wait_desc;
1224 int wait_index, index;
1225 unsigned long flags;
1226
1227 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +08001228 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001229
1230 hw = qi->desc;
1231
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001232restart:
1233 rc = 0;
1234
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001235 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001236 while (qi->free_cnt < 3) {
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001237 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001238 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001239 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001240 }
1241
1242 index = qi->free_head;
1243 wait_index = (index + 1) % QI_LENGTH;
1244
1245 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1246
1247 hw[index] = *desc;
1248
Yu Zhao704126a2009-01-04 16:28:52 +08001249 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1250 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001251 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1252
1253 hw[wait_index] = wait_desc;
1254
Suresh Siddhafe962e92008-07-10 11:16:42 -07001255 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1256 qi->free_cnt -= 2;
1257
Suresh Siddhafe962e92008-07-10 11:16:42 -07001258 /*
1259 * update the HW tail register indicating the presence of
1260 * new descriptors.
1261 */
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001262 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001263
1264 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -07001265 /*
1266 * We will leave the interrupts disabled, to prevent interrupt
1267 * context to queue another cmd while a cmd is already submitted
1268 * and waiting for completion on this cpu. This is to avoid
1269 * a deadlock where the interrupt context can wait indefinitely
1270 * for free slots in the queue.
1271 */
Yu Zhao704126a2009-01-04 16:28:52 +08001272 rc = qi_check_fault(iommu, index);
1273 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001274 break;
Yu Zhao704126a2009-01-04 16:28:52 +08001275
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001276 raw_spin_unlock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001277 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001278 raw_spin_lock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001279 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001280
1281 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001282
1283 reclaim_free_desc(qi);
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001284 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +08001285
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001286 if (rc == -EAGAIN)
1287 goto restart;
1288
Yu Zhao704126a2009-01-04 16:28:52 +08001289 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001290}
1291
1292/*
1293 * Flush the global interrupt entry cache.
1294 */
1295void qi_global_iec(struct intel_iommu *iommu)
1296{
1297 struct qi_desc desc;
1298
1299 desc.low = QI_IEC_TYPE;
1300 desc.high = 0;
1301
Yu Zhao704126a2009-01-04 16:28:52 +08001302 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -07001303 qi_submit_sync(&desc, iommu);
1304}
1305
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001306void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1307 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -07001308{
Youquan Song3481f212008-10-16 16:31:55 -07001309 struct qi_desc desc;
1310
Youquan Song3481f212008-10-16 16:31:55 -07001311 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1312 | QI_CC_GRAN(type) | QI_CC_TYPE;
1313 desc.high = 0;
1314
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001315 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -07001316}
1317
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001318void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1319 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -07001320{
1321 u8 dw = 0, dr = 0;
1322
1323 struct qi_desc desc;
1324 int ih = 0;
1325
Youquan Song3481f212008-10-16 16:31:55 -07001326 if (cap_write_drain(iommu->cap))
1327 dw = 1;
1328
1329 if (cap_read_drain(iommu->cap))
1330 dr = 1;
1331
1332 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1333 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1334 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1335 | QI_IOTLB_AM(size_order);
1336
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001337 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -07001338}
1339
Jacob Panb68377c2018-06-07 09:57:00 -07001340void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1341 u16 qdep, u64 addr, unsigned mask)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001342{
1343 struct qi_desc desc;
1344
1345 if (mask) {
1346 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1347 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1348 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1349 } else
1350 desc.high = QI_DEV_IOTLB_ADDR(addr);
1351
1352 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1353 qdep = 0;
1354
1355 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
Jacob Panb68377c2018-06-07 09:57:00 -07001356 QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +08001357
1358 qi_submit_sync(&desc, iommu);
1359}
1360
Suresh Siddhafe962e92008-07-10 11:16:42 -07001361/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001362 * Disable Queued Invalidation interface.
1363 */
1364void dmar_disable_qi(struct intel_iommu *iommu)
1365{
1366 unsigned long flags;
1367 u32 sts;
1368 cycles_t start_time = get_cycles();
1369
1370 if (!ecap_qis(iommu->ecap))
1371 return;
1372
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001373 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001374
CQ Tangfda3bec2016-01-13 21:15:03 +00001375 sts = readl(iommu->reg + DMAR_GSTS_REG);
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001376 if (!(sts & DMA_GSTS_QIES))
1377 goto end;
1378
1379 /*
1380 * Give a chance to HW to complete the pending invalidation requests.
1381 */
1382 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1383 readl(iommu->reg + DMAR_IQH_REG)) &&
1384 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1385 cpu_relax();
1386
1387 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001388 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1389
1390 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1391 !(sts & DMA_GSTS_QIES), sts);
1392end:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001393 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -07001394}
1395
1396/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001397 * Enable queued invalidation.
1398 */
1399static void __dmar_enable_qi(struct intel_iommu *iommu)
1400{
David Woodhousec416daa2009-05-10 20:30:58 +01001401 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001402 unsigned long flags;
1403 struct q_inval *qi = iommu->qi;
1404
1405 qi->free_head = qi->free_tail = 0;
1406 qi->free_cnt = QI_LENGTH;
1407
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001408 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001409
1410 /* write zero to the tail reg */
1411 writel(0, iommu->reg + DMAR_IQT_REG);
1412
1413 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1414
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001415 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +01001416 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001417
1418 /* Make sure hardware complete it */
1419 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1420
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001421 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001422}
1423
1424/*
Suresh Siddhafe962e92008-07-10 11:16:42 -07001425 * Enable Queued Invalidation interface. This is a must to support
1426 * interrupt-remapping. Also used by DMA-remapping, which replaces
1427 * register based IOTLB invalidation.
1428 */
1429int dmar_enable_qi(struct intel_iommu *iommu)
1430{
Suresh Siddhafe962e92008-07-10 11:16:42 -07001431 struct q_inval *qi;
Suresh Siddha751cafe2009-10-02 11:01:22 -07001432 struct page *desc_page;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001433
1434 if (!ecap_qis(iommu->ecap))
1435 return -ENOENT;
1436
1437 /*
1438 * queued invalidation is already setup and enabled.
1439 */
1440 if (iommu->qi)
1441 return 0;
1442
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001443 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001444 if (!iommu->qi)
1445 return -ENOMEM;
1446
1447 qi = iommu->qi;
1448
Suresh Siddha751cafe2009-10-02 11:01:22 -07001449
1450 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1451 if (!desc_page) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001452 kfree(qi);
Jiang Liub707cb02014-01-06 14:18:26 +08001453 iommu->qi = NULL;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001454 return -ENOMEM;
1455 }
1456
Suresh Siddha751cafe2009-10-02 11:01:22 -07001457 qi->desc = page_address(desc_page);
1458
Hannes Reinecke37a40712013-02-06 09:50:10 +01001459 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001460 if (!qi->desc_status) {
1461 free_page((unsigned long) qi->desc);
1462 kfree(qi);
Jiang Liub707cb02014-01-06 14:18:26 +08001463 iommu->qi = NULL;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001464 return -ENOMEM;
1465 }
1466
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001467 raw_spin_lock_init(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001468
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001469 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001470
1471 return 0;
1472}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001473
1474/* iommu interrupt handling. Most stuff are MSI-like. */
1475
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001476enum faulttype {
1477 DMA_REMAP,
1478 INTR_REMAP,
1479 UNKNOWN,
1480};
1481
1482static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001483{
1484 "Software",
1485 "Present bit in root entry is clear",
1486 "Present bit in context entry is clear",
1487 "Invalid context entry",
1488 "Access beyond MGAW",
1489 "PTE Write access is not set",
1490 "PTE Read access is not set",
1491 "Next page table ptr is invalid",
1492 "Root table address invalid",
1493 "Context table ptr is invalid",
1494 "non-zero reserved fields in RTP",
1495 "non-zero reserved fields in CTP",
1496 "non-zero reserved fields in PTE",
Li, Zhen-Hua4ecccd92013-03-06 10:43:17 +08001497 "PCE for translation request specifies blocking",
Suresh Siddha0ac24912009-03-16 17:04:54 -07001498};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001499
Suresh Siddha95a02e92012-03-30 11:47:07 -07001500static const char *irq_remap_fault_reasons[] =
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001501{
1502 "Detected reserved fields in the decoded interrupt-remapped request",
1503 "Interrupt index exceeded the interrupt-remapping table size",
1504 "Present field in the IRTE entry is clear",
1505 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1506 "Detected reserved fields in the IRTE entry",
1507 "Blocked a compatibility format interrupt request",
1508 "Blocked an interrupt request due to source-id verification failure",
1509};
1510
Rashika Kheria21004dc2013-12-18 12:01:46 +05301511static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001512{
Dan Carpenterfefe1ed2012-05-13 20:09:38 +03001513 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1514 ARRAY_SIZE(irq_remap_fault_reasons))) {
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001515 *fault_type = INTR_REMAP;
Suresh Siddha95a02e92012-03-30 11:47:07 -07001516 return irq_remap_fault_reasons[fault_reason - 0x20];
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001517 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1518 *fault_type = DMA_REMAP;
1519 return dma_remap_fault_reasons[fault_reason];
1520 } else {
1521 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001522 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001523 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001524}
1525
David Woodhouse12082252015-10-07 15:37:03 +01001526
1527static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1528{
1529 if (iommu->irq == irq)
1530 return DMAR_FECTL_REG;
1531 else if (iommu->pr_irq == irq)
1532 return DMAR_PECTL_REG;
1533 else
1534 BUG();
1535}
1536
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001537void dmar_msi_unmask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001538{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001539 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
David Woodhouse12082252015-10-07 15:37:03 +01001540 int reg = dmar_msi_reg(iommu, data->irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001541 unsigned long flag;
1542
1543 /* unmask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001544 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse12082252015-10-07 15:37:03 +01001545 writel(0, iommu->reg + reg);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001546 /* Read a reg to force flush the post write */
David Woodhouse12082252015-10-07 15:37:03 +01001547 readl(iommu->reg + reg);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001548 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001549}
1550
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001551void dmar_msi_mask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001552{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001553 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
David Woodhouse12082252015-10-07 15:37:03 +01001554 int reg = dmar_msi_reg(iommu, data->irq);
1555 unsigned long flag;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001556
1557 /* mask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001558 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse12082252015-10-07 15:37:03 +01001559 writel(DMA_FECTL_IM, iommu->reg + reg);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001560 /* Read a reg to force flush the post write */
David Woodhouse12082252015-10-07 15:37:03 +01001561 readl(iommu->reg + reg);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001562 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001563}
1564
1565void dmar_msi_write(int irq, struct msi_msg *msg)
1566{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001567 struct intel_iommu *iommu = irq_get_handler_data(irq);
David Woodhouse12082252015-10-07 15:37:03 +01001568 int reg = dmar_msi_reg(iommu, irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001569 unsigned long flag;
1570
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001571 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse12082252015-10-07 15:37:03 +01001572 writel(msg->data, iommu->reg + reg + 4);
1573 writel(msg->address_lo, iommu->reg + reg + 8);
1574 writel(msg->address_hi, iommu->reg + reg + 12);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001575 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001576}
1577
1578void dmar_msi_read(int irq, struct msi_msg *msg)
1579{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001580 struct intel_iommu *iommu = irq_get_handler_data(irq);
David Woodhouse12082252015-10-07 15:37:03 +01001581 int reg = dmar_msi_reg(iommu, irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001582 unsigned long flag;
1583
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001584 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse12082252015-10-07 15:37:03 +01001585 msg->data = readl(iommu->reg + reg + 4);
1586 msg->address_lo = readl(iommu->reg + reg + 8);
1587 msg->address_hi = readl(iommu->reg + reg + 12);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001588 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001589}
1590
1591static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1592 u8 fault_reason, u16 source_id, unsigned long long addr)
1593{
1594 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001595 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001596
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001597 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001598
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001599 if (fault_type == INTR_REMAP)
Alex Williamsona0fe14d2016-03-17 14:12:31 -06001600 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
1601 source_id >> 8, PCI_SLOT(source_id & 0xFF),
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001602 PCI_FUNC(source_id & 0xFF), addr >> 48,
1603 fault_reason, reason);
1604 else
Alex Williamsona0fe14d2016-03-17 14:12:31 -06001605 pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
1606 type ? "DMA Read" : "DMA Write",
1607 source_id >> 8, PCI_SLOT(source_id & 0xFF),
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001608 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001609 return 0;
1610}
1611
1612#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001613irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001614{
1615 struct intel_iommu *iommu = dev_id;
1616 int reg, fault_index;
1617 u32 fault_status;
1618 unsigned long flag;
Alex Williamsonc43fce42016-03-17 14:12:25 -06001619 bool ratelimited;
1620 static DEFINE_RATELIMIT_STATE(rs,
1621 DEFAULT_RATELIMIT_INTERVAL,
1622 DEFAULT_RATELIMIT_BURST);
1623
1624 /* Disable printing, simply clear the fault when ratelimited */
1625 ratelimited = !__ratelimit(&rs);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001626
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001627 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001628 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Alex Williamsonc43fce42016-03-17 14:12:25 -06001629 if (fault_status && !ratelimited)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001630 pr_err("DRHD: handling fault status reg %x\n", fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001631
1632 /* TBD: ignore advanced fault log currently */
1633 if (!(fault_status & DMA_FSTS_PPF))
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001634 goto unlock_exit;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001635
1636 fault_index = dma_fsts_fault_record_index(fault_status);
1637 reg = cap_fault_reg_offset(iommu->cap);
1638 while (1) {
1639 u8 fault_reason;
1640 u16 source_id;
1641 u64 guest_addr;
1642 int type;
1643 u32 data;
1644
1645 /* highest 32 bits */
1646 data = readl(iommu->reg + reg +
1647 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1648 if (!(data & DMA_FRCD_F))
1649 break;
1650
Alex Williamsonc43fce42016-03-17 14:12:25 -06001651 if (!ratelimited) {
1652 fault_reason = dma_frcd_fault_reason(data);
1653 type = dma_frcd_type(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001654
Alex Williamsonc43fce42016-03-17 14:12:25 -06001655 data = readl(iommu->reg + reg +
1656 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1657 source_id = dma_frcd_source_id(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001658
Alex Williamsonc43fce42016-03-17 14:12:25 -06001659 guest_addr = dmar_readq(iommu->reg + reg +
1660 fault_index * PRIMARY_FAULT_REG_LEN);
1661 guest_addr = dma_frcd_page_addr(guest_addr);
1662 }
1663
Suresh Siddha0ac24912009-03-16 17:04:54 -07001664 /* clear the fault */
1665 writel(DMA_FRCD_F, iommu->reg + reg +
1666 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1667
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001668 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001669
Alex Williamsonc43fce42016-03-17 14:12:25 -06001670 if (!ratelimited)
1671 dmar_fault_do_one(iommu, type, fault_reason,
1672 source_id, guest_addr);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001673
1674 fault_index++;
Troy Heber8211a7b2009-08-19 15:26:11 -06001675 if (fault_index >= cap_num_fault_regs(iommu->cap))
Suresh Siddha0ac24912009-03-16 17:04:54 -07001676 fault_index = 0;
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001677 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001678 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001679
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001680 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1681
1682unlock_exit:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001683 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001684 return IRQ_HANDLED;
1685}
1686
1687int dmar_set_interrupt(struct intel_iommu *iommu)
1688{
1689 int irq, ret;
1690
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001691 /*
1692 * Check if the fault interrupt is already initialized.
1693 */
1694 if (iommu->irq)
1695 return 0;
1696
Jiang Liu34742db2015-04-13 14:11:41 +08001697 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
1698 if (irq > 0) {
1699 iommu->irq = irq;
1700 } else {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001701 pr_err("No free IRQ vectors\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001702 return -EINVAL;
1703 }
1704
Thomas Gleixner477694e2011-07-19 16:25:42 +02001705 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001706 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001707 pr_err("Can't request irq\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001708 return ret;
1709}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001710
1711int __init enable_drhd_fault_handling(void)
1712{
1713 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08001714 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001715
1716 /*
1717 * Enable fault control interrupt.
1718 */
Jiang Liu7c919772014-01-06 14:18:18 +08001719 for_each_iommu(iommu, drhd) {
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001720 u32 fault_status;
Jiang Liu7c919772014-01-06 14:18:18 +08001721 int ret = dmar_set_interrupt(iommu);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001722
1723 if (ret) {
Donald Dutilee9071b02012-06-08 17:13:11 -04001724 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001725 (unsigned long long)drhd->reg_base_addr, ret);
1726 return -1;
1727 }
Suresh Siddha7f99d942010-11-30 22:22:29 -08001728
1729 /*
1730 * Clear any previous faults.
1731 */
1732 dmar_fault(iommu->irq, iommu);
Li, Zhen-Huabd5cdad2013-03-25 16:20:52 +08001733 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1734 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001735 }
1736
1737 return 0;
1738}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001739
1740/*
1741 * Re-enable Queued Invalidation interface.
1742 */
1743int dmar_reenable_qi(struct intel_iommu *iommu)
1744{
1745 if (!ecap_qis(iommu->ecap))
1746 return -ENOENT;
1747
1748 if (!iommu->qi)
1749 return -ENOENT;
1750
1751 /*
1752 * First disable queued invalidation.
1753 */
1754 dmar_disable_qi(iommu);
1755 /*
1756 * Then enable queued invalidation again. Since there is no pending
1757 * invalidation requests now, it's safe to re-enable queued
1758 * invalidation.
1759 */
1760 __dmar_enable_qi(iommu);
1761
1762 return 0;
1763}
Youquan Song074835f2009-09-09 12:05:39 -04001764
1765/*
1766 * Check interrupt remapping support in DMAR table description.
1767 */
Luck, Tony0b8973a2009-12-16 22:59:29 +00001768int __init dmar_ir_support(void)
Youquan Song074835f2009-09-09 12:05:39 -04001769{
1770 struct acpi_table_dmar *dmar;
1771 dmar = (struct acpi_table_dmar *)dmar_tbl;
Arnaud Patard4f506e02010-03-25 18:02:58 +00001772 if (!dmar)
1773 return 0;
Youquan Song074835f2009-09-09 12:05:39 -04001774 return dmar->flags & 0x1;
1775}
Jiang Liu694835d2014-01-06 14:18:16 +08001776
Jiang Liu6b197242014-11-09 22:47:58 +08001777/* Check whether DMAR units are in use */
1778static inline bool dmar_in_use(void)
1779{
1780 return irq_remapping_enabled || intel_iommu_enabled;
1781}
1782
Jiang Liua868e6b2014-01-06 14:18:20 +08001783static int __init dmar_free_unused_resources(void)
1784{
1785 struct dmar_drhd_unit *dmaru, *dmaru_n;
1786
Jiang Liu6b197242014-11-09 22:47:58 +08001787 if (dmar_in_use())
Jiang Liua868e6b2014-01-06 14:18:20 +08001788 return 0;
1789
Jiang Liu2e455282014-02-19 14:07:36 +08001790 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1791 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
Jiang Liu59ce0512014-02-19 14:07:35 +08001792
Jiang Liu3a5670e2014-02-19 14:07:33 +08001793 down_write(&dmar_global_lock);
Jiang Liua868e6b2014-01-06 14:18:20 +08001794 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1795 list_del(&dmaru->list);
1796 dmar_free_drhd(dmaru);
1797 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08001798 up_write(&dmar_global_lock);
Jiang Liua868e6b2014-01-06 14:18:20 +08001799
1800 return 0;
1801}
1802
1803late_initcall(dmar_free_unused_resources);
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -04001804IOMMU_INIT_POST(detect_intel_iommu);
Jiang Liu6b197242014-11-09 22:47:58 +08001805
1806/*
1807 * DMAR Hotplug Support
1808 * For more details, please refer to Intel(R) Virtualization Technology
1809 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1810 * "Remapping Hardware Unit Hot Plug".
1811 */
1812static u8 dmar_hp_uuid[] = {
1813 /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
1814 /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
1815};
1816
1817/*
1818 * Currently there's only one revision and BIOS will not check the revision id,
1819 * so use 0 for safety.
1820 */
1821#define DMAR_DSM_REV_ID 0
1822#define DMAR_DSM_FUNC_DRHD 1
1823#define DMAR_DSM_FUNC_ATSR 2
1824#define DMAR_DSM_FUNC_RHSA 3
1825
1826static inline bool dmar_detect_dsm(acpi_handle handle, int func)
1827{
1828 return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
1829}
1830
1831static int dmar_walk_dsm_resource(acpi_handle handle, int func,
1832 dmar_res_handler_t handler, void *arg)
1833{
1834 int ret = -ENODEV;
1835 union acpi_object *obj;
1836 struct acpi_dmar_header *start;
1837 struct dmar_res_callback callback;
1838 static int res_type[] = {
1839 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
1840 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
1841 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
1842 };
1843
1844 if (!dmar_detect_dsm(handle, func))
1845 return 0;
1846
1847 obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
1848 func, NULL, ACPI_TYPE_BUFFER);
1849 if (!obj)
1850 return -ENODEV;
1851
1852 memset(&callback, 0, sizeof(callback));
1853 callback.cb[res_type[func]] = handler;
1854 callback.arg[res_type[func]] = arg;
1855 start = (struct acpi_dmar_header *)obj->buffer.pointer;
1856 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
1857
1858 ACPI_FREE(obj);
1859
1860 return ret;
1861}
1862
1863static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
1864{
1865 int ret;
1866 struct dmar_drhd_unit *dmaru;
1867
1868 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1869 if (!dmaru)
1870 return -ENODEV;
1871
1872 ret = dmar_ir_hotplug(dmaru, true);
1873 if (ret == 0)
1874 ret = dmar_iommu_hotplug(dmaru, true);
1875
1876 return ret;
1877}
1878
1879static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
1880{
1881 int i, ret;
1882 struct device *dev;
1883 struct dmar_drhd_unit *dmaru;
1884
1885 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1886 if (!dmaru)
1887 return 0;
1888
1889 /*
1890 * All PCI devices managed by this unit should have been destroyed.
1891 */
Linus Torvalds194dc872016-07-27 20:03:31 -07001892 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
Jiang Liu6b197242014-11-09 22:47:58 +08001893 for_each_active_dev_scope(dmaru->devices,
1894 dmaru->devices_cnt, i, dev)
1895 return -EBUSY;
Linus Torvalds194dc872016-07-27 20:03:31 -07001896 }
Jiang Liu6b197242014-11-09 22:47:58 +08001897
1898 ret = dmar_ir_hotplug(dmaru, false);
1899 if (ret == 0)
1900 ret = dmar_iommu_hotplug(dmaru, false);
1901
1902 return ret;
1903}
1904
1905static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
1906{
1907 struct dmar_drhd_unit *dmaru;
1908
1909 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1910 if (dmaru) {
1911 list_del_rcu(&dmaru->list);
1912 synchronize_rcu();
1913 dmar_free_drhd(dmaru);
1914 }
1915
1916 return 0;
1917}
1918
1919static int dmar_hotplug_insert(acpi_handle handle)
1920{
1921 int ret;
1922 int drhd_count = 0;
1923
1924 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1925 &dmar_validate_one_drhd, (void *)1);
1926 if (ret)
1927 goto out;
1928
1929 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1930 &dmar_parse_one_drhd, (void *)&drhd_count);
1931 if (ret == 0 && drhd_count == 0) {
1932 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
1933 goto out;
1934 } else if (ret) {
1935 goto release_drhd;
1936 }
1937
1938 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
1939 &dmar_parse_one_rhsa, NULL);
1940 if (ret)
1941 goto release_drhd;
1942
1943 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1944 &dmar_parse_one_atsr, NULL);
1945 if (ret)
1946 goto release_atsr;
1947
1948 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1949 &dmar_hp_add_drhd, NULL);
1950 if (!ret)
1951 return 0;
1952
1953 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1954 &dmar_hp_remove_drhd, NULL);
1955release_atsr:
1956 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1957 &dmar_release_one_atsr, NULL);
1958release_drhd:
1959 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1960 &dmar_hp_release_drhd, NULL);
1961out:
1962 return ret;
1963}
1964
1965static int dmar_hotplug_remove(acpi_handle handle)
1966{
1967 int ret;
1968
1969 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1970 &dmar_check_one_atsr, NULL);
1971 if (ret)
1972 return ret;
1973
1974 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1975 &dmar_hp_remove_drhd, NULL);
1976 if (ret == 0) {
1977 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1978 &dmar_release_one_atsr, NULL));
1979 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1980 &dmar_hp_release_drhd, NULL));
1981 } else {
1982 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1983 &dmar_hp_add_drhd, NULL);
1984 }
1985
1986 return ret;
1987}
1988
Jiang Liud35165a2014-11-09 22:47:59 +08001989static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
1990 void *context, void **retval)
1991{
1992 acpi_handle *phdl = retval;
1993
1994 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
1995 *phdl = handle;
1996 return AE_CTRL_TERMINATE;
1997 }
1998
1999 return AE_OK;
2000}
2001
Jiang Liu6b197242014-11-09 22:47:58 +08002002static int dmar_device_hotplug(acpi_handle handle, bool insert)
2003{
2004 int ret;
Jiang Liud35165a2014-11-09 22:47:59 +08002005 acpi_handle tmp = NULL;
2006 acpi_status status;
Jiang Liu6b197242014-11-09 22:47:58 +08002007
2008 if (!dmar_in_use())
2009 return 0;
2010
Jiang Liud35165a2014-11-09 22:47:59 +08002011 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2012 tmp = handle;
2013 } else {
2014 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2015 ACPI_UINT32_MAX,
2016 dmar_get_dsm_handle,
2017 NULL, NULL, &tmp);
2018 if (ACPI_FAILURE(status)) {
2019 pr_warn("Failed to locate _DSM method.\n");
2020 return -ENXIO;
2021 }
2022 }
2023 if (tmp == NULL)
Jiang Liu6b197242014-11-09 22:47:58 +08002024 return 0;
2025
2026 down_write(&dmar_global_lock);
2027 if (insert)
Jiang Liud35165a2014-11-09 22:47:59 +08002028 ret = dmar_hotplug_insert(tmp);
Jiang Liu6b197242014-11-09 22:47:58 +08002029 else
Jiang Liud35165a2014-11-09 22:47:59 +08002030 ret = dmar_hotplug_remove(tmp);
Jiang Liu6b197242014-11-09 22:47:58 +08002031 up_write(&dmar_global_lock);
2032
2033 return ret;
2034}
2035
2036int dmar_device_add(acpi_handle handle)
2037{
2038 return dmar_device_hotplug(handle, true);
2039}
2040
2041int dmar_device_remove(acpi_handle handle)
2042{
2043 return dmar_device_hotplug(handle, false);
2044}