blob: 1eb743c700e9a5c256de79e83cbd6203a056fe09 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020018 * Joerg Roedel <jroedel@suse.de>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070019 */
20
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020021#define pr_fmt(fmt) "DMAR: " fmt
22
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070023#include <linux/init.h>
24#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080025#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040026#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080035#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080044#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070045#include <linux/dma-contiguous.h>
Joerg Roedel091d42e2015-06-12 11:56:10 +020046#include <linux/crash_dump.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070047#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070048#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090049#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070050
Joerg Roedel078e1ee2012-09-26 12:44:43 +020051#include "irq_remapping.h"
52
Fenghua Yu5b6985c2008-10-16 18:02:32 -070053#define ROOT_SIZE VTD_PAGE_SIZE
54#define CONTEXT_SIZE VTD_PAGE_SIZE
55
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070056#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
David Woodhouse18436af2015-03-25 15:05:47 +000057#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070059#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070060
61#define IOAPIC_RANGE_START (0xfee00000)
62#define IOAPIC_RANGE_END (0xfeefffff)
63#define IOVA_START_ADDR (0x1000)
64
65#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
66
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070067#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080068#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070069
David Woodhouse2ebe3152009-09-19 07:34:04 -070070#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
71#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
72
73/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
74 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
75#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
76 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
77#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070078
Robin Murphy1b722502015-01-12 17:51:15 +000079/* IO virtual address start page frame number */
80#define IOVA_START_PFN (1)
81
Mark McLoughlinf27be032008-11-20 15:49:43 +000082#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070083#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070084#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080085
Andrew Mortondf08cdc2010-09-22 13:05:11 -070086/* page table handling */
87#define LEVEL_STRIDE (9)
88#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
89
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020090/*
91 * This bitmap is used to advertise the page sizes our hardware support
92 * to the IOMMU core, which will then use this information to split
93 * physically contiguous memory regions it is mapping into page sizes
94 * that we support.
95 *
96 * Traditionally the IOMMU core just handed us the mappings directly,
97 * after making sure the size is an order of a 4KiB page and that the
98 * mapping has natural alignment.
99 *
100 * To retain this behavior, we currently advertise that we support
101 * all page sizes that are an order of 4KiB.
102 *
103 * If at some point we'd like to utilize the IOMMU core's new behavior,
104 * we could change this to advertise the real page sizes we support.
105 */
106#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
107
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700108static inline int agaw_to_level(int agaw)
109{
110 return agaw + 2;
111}
112
113static inline int agaw_to_width(int agaw)
114{
Jiang Liu5c645b32014-01-06 14:18:12 +0800115 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700116}
117
118static inline int width_to_agaw(int width)
119{
Jiang Liu5c645b32014-01-06 14:18:12 +0800120 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700121}
122
123static inline unsigned int level_to_offset_bits(int level)
124{
125 return (level - 1) * LEVEL_STRIDE;
126}
127
128static inline int pfn_level_offset(unsigned long pfn, int level)
129{
130 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
131}
132
133static inline unsigned long level_mask(int level)
134{
135 return -1UL << level_to_offset_bits(level);
136}
137
138static inline unsigned long level_size(int level)
139{
140 return 1UL << level_to_offset_bits(level);
141}
142
143static inline unsigned long align_to_level(unsigned long pfn, int level)
144{
145 return (pfn + level_size(level) - 1) & level_mask(level);
146}
David Woodhousefd18de52009-05-10 23:57:41 +0100147
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100148static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
149{
Jiang Liu5c645b32014-01-06 14:18:12 +0800150 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100151}
152
David Woodhousedd4e8312009-06-27 16:21:20 +0100153/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
154 are never going to work. */
155static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
156{
157 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
158}
159
160static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
161{
162 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
163}
164static inline unsigned long page_to_dma_pfn(struct page *pg)
165{
166 return mm_to_dma_pfn(page_to_pfn(pg));
167}
168static inline unsigned long virt_to_dma_pfn(void *p)
169{
170 return page_to_dma_pfn(virt_to_page(p));
171}
172
Weidong Hand9630fe2008-12-08 11:06:32 +0800173/* global iommu list, set NULL for ignored DMAR units */
174static struct intel_iommu **g_iommus;
175
David Woodhousee0fc7e02009-09-30 09:12:17 -0700176static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000177static int rwbf_quirk;
178
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000179/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700180 * set to 1 to panic kernel if can't successfully enable VT-d
181 * (used when kernel is launched w/ TXT)
182 */
183static int force_on = 0;
184
185/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000186 * 0: Present
187 * 1-11: Reserved
188 * 12-63: Context Ptr (12 - (haw-1))
189 * 64-127: Reserved
190 */
191struct root_entry {
David Woodhouse03ecc322015-02-13 14:35:21 +0000192 u64 lo;
193 u64 hi;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000194};
195#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000196
Joerg Roedel091d42e2015-06-12 11:56:10 +0200197/*
198 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
199 * if marked present.
200 */
201static phys_addr_t root_entry_lctp(struct root_entry *re)
202{
203 if (!(re->lo & 1))
204 return 0;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000205
Joerg Roedel091d42e2015-06-12 11:56:10 +0200206 return re->lo & VTD_PAGE_MASK;
207}
208
209/*
210 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
211 * if marked present.
212 */
213static phys_addr_t root_entry_uctp(struct root_entry *re)
214{
215 if (!(re->hi & 1))
216 return 0;
217
218 return re->hi & VTD_PAGE_MASK;
219}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000220/*
221 * low 64 bits:
222 * 0: present
223 * 1: fault processing disable
224 * 2-3: translation type
225 * 12-63: address space root
226 * high 64 bits:
227 * 0-2: address width
228 * 3-6: aval
229 * 8-23: domain id
230 */
231struct context_entry {
232 u64 lo;
233 u64 hi;
234};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000235
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000236static inline bool context_present(struct context_entry *context)
237{
238 return (context->lo & 1);
239}
240static inline void context_set_present(struct context_entry *context)
241{
242 context->lo |= 1;
243}
244
245static inline void context_set_fault_enable(struct context_entry *context)
246{
247 context->lo &= (((u64)-1) << 2) | 1;
248}
249
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000250static inline void context_set_translation_type(struct context_entry *context,
251 unsigned long value)
252{
253 context->lo &= (((u64)-1) << 4) | 3;
254 context->lo |= (value & 3) << 2;
255}
256
257static inline void context_set_address_root(struct context_entry *context,
258 unsigned long value)
259{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800260 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000261 context->lo |= value & VTD_PAGE_MASK;
262}
263
264static inline void context_set_address_width(struct context_entry *context,
265 unsigned long value)
266{
267 context->hi |= value & 7;
268}
269
270static inline void context_set_domain_id(struct context_entry *context,
271 unsigned long value)
272{
273 context->hi |= (value & ((1 << 16) - 1)) << 8;
274}
275
276static inline void context_clear_entry(struct context_entry *context)
277{
278 context->lo = 0;
279 context->hi = 0;
280}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000281
Mark McLoughlin622ba122008-11-20 15:49:46 +0000282/*
283 * 0: readable
284 * 1: writable
285 * 2-6: reserved
286 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800287 * 8-10: available
288 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000289 * 12-63: Host physcial address
290 */
291struct dma_pte {
292 u64 val;
293};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000294
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000295static inline void dma_clear_pte(struct dma_pte *pte)
296{
297 pte->val = 0;
298}
299
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000300static inline u64 dma_pte_addr(struct dma_pte *pte)
301{
David Woodhousec85994e2009-07-01 19:21:24 +0100302#ifdef CONFIG_64BIT
303 return pte->val & VTD_PAGE_MASK;
304#else
305 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100306 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100307#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000308}
309
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000310static inline bool dma_pte_present(struct dma_pte *pte)
311{
312 return (pte->val & 3) != 0;
313}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000314
Allen Kay4399c8b2011-10-14 12:32:46 -0700315static inline bool dma_pte_superpage(struct dma_pte *pte)
316{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200317 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700318}
319
David Woodhouse75e6bf92009-07-02 11:21:16 +0100320static inline int first_pte_in_page(struct dma_pte *pte)
321{
322 return !((unsigned long)pte & ~VTD_PAGE_MASK);
323}
324
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700325/*
326 * This domain is a statically identity mapping domain.
327 * 1. This domain creats a static 1:1 mapping to all usable memory.
328 * 2. It maps to each iommu if successful.
329 * 3. Each iommu mapps to this domain if successful.
330 */
David Woodhouse19943b02009-08-04 16:19:20 +0100331static struct dmar_domain *si_domain;
332static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700333
Weidong Han1ce28fe2008-12-08 16:35:39 +0800334/* domain represents a virtual machine, more than one devices
335 * across iommus may be owned in one domain, e.g. kvm guest.
336 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800337#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800338
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700339/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800340#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700341
Mark McLoughlin99126f72008-11-20 15:49:47 +0000342struct dmar_domain {
343 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700344 int nid; /* node id */
Jiang Liu78d8e702014-11-09 22:47:57 +0800345 DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
Mike Travis1b198bb2012-03-05 15:05:16 -0800346 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000347
Joerg Roedel00a77de2015-03-26 13:43:08 +0100348 struct list_head devices; /* all devices' list */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000349 struct iova_domain iovad; /* iova's that belong to this domain */
350
351 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000352 int gaw; /* max guest address width */
353
354 /* adjusted guest address width, 0 is level 2 30-bit */
355 int agaw;
356
Weidong Han3b5410e2008-12-08 09:17:15 +0800357 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800358
359 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800360 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800361 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100362 int iommu_superpage;/* Level of superpages supported:
363 0 == 4KiB (no superpages), 1 == 2MiB,
364 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800365 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800366 u64 max_addr; /* maximum mapped address */
Joerg Roedel00a77de2015-03-26 13:43:08 +0100367
368 struct iommu_domain domain; /* generic domain data structure for
369 iommu core */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000370};
371
Mark McLoughlina647dac2008-11-20 15:49:48 +0000372/* PCI domain-device relationship */
373struct device_domain_info {
374 struct list_head link; /* link to domain siblings */
375 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100376 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000377 u8 devfn; /* PCI devfn number */
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000378 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800379 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000380 struct dmar_domain *domain; /* pointer to domain */
381};
382
Jiang Liub94e4112014-02-19 14:07:25 +0800383struct dmar_rmrr_unit {
384 struct list_head list; /* list of rmrr units */
385 struct acpi_dmar_header *hdr; /* ACPI header */
386 u64 base_address; /* reserved base address*/
387 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000388 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800389 int devices_cnt; /* target device count */
390};
391
392struct dmar_atsr_unit {
393 struct list_head list; /* list of ATSR units */
394 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000395 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800396 int devices_cnt; /* target device count */
397 u8 include_all:1; /* include all ports */
398};
399
400static LIST_HEAD(dmar_atsr_units);
401static LIST_HEAD(dmar_rmrr_units);
402
403#define for_each_rmrr_units(rmrr) \
404 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
405
mark gross5e0d2a62008-03-04 15:22:08 -0800406static void flush_unmaps_timeout(unsigned long data);
407
Jiang Liub707cb02014-01-06 14:18:26 +0800408static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800409
mark gross80b20dd2008-04-18 13:53:58 -0700410#define HIGH_WATER_MARK 250
411struct deferred_flush_tables {
412 int next;
413 struct iova *iova[HIGH_WATER_MARK];
414 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000415 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700416};
417
418static struct deferred_flush_tables *deferred_flush;
419
mark gross5e0d2a62008-03-04 15:22:08 -0800420/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800421static int g_num_of_iommus;
422
423static DEFINE_SPINLOCK(async_umap_flush_lock);
424static LIST_HEAD(unmaps_to_do);
425
426static int timer_on;
427static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800428
Jiang Liu92d03cc2014-02-19 14:07:28 +0800429static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700430static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800431static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -0700432 struct device *dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800433static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000434 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800435static int domain_detach_iommu(struct dmar_domain *domain,
436 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700437
Suresh Siddhad3f13812011-08-23 17:05:25 -0700438#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800439int dmar_disabled = 0;
440#else
441int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700442#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800443
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200444int intel_iommu_enabled = 0;
445EXPORT_SYMBOL_GPL(intel_iommu_enabled);
446
David Woodhouse2d9e6672010-06-15 10:57:57 +0100447static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700448static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800449static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100450static int intel_iommu_superpage = 1;
David Woodhousec83b2f22015-06-12 10:15:49 +0100451static int intel_iommu_ecs = 1;
452
453/* We only actually use ECS when PASID support (on the new bit 40)
454 * is also advertised. Some early implementations — the ones with
455 * PASID support on bit 28 — have issues even when we *only* use
456 * extended root/context tables. */
457#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
458 ecap_pasid(iommu->ecap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700459
David Woodhousec0771df2011-10-14 20:59:46 +0100460int intel_iommu_gfx_mapped;
461EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
462
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700463#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
464static DEFINE_SPINLOCK(device_domain_lock);
465static LIST_HEAD(device_domain_list);
466
Thierry Redingb22f6432014-06-27 09:03:12 +0200467static const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100468
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200469static bool translation_pre_enabled(struct intel_iommu *iommu)
470{
471 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
472}
473
Joerg Roedel091d42e2015-06-12 11:56:10 +0200474static void clear_translation_pre_enabled(struct intel_iommu *iommu)
475{
476 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
477}
478
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200479static void init_translation_status(struct intel_iommu *iommu)
480{
481 u32 gsts;
482
483 gsts = readl(iommu->reg + DMAR_GSTS_REG);
484 if (gsts & DMA_GSTS_TES)
485 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
486}
487
Joerg Roedel00a77de2015-03-26 13:43:08 +0100488/* Convert generic 'struct iommu_domain to private struct dmar_domain */
489static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
490{
491 return container_of(dom, struct dmar_domain, domain);
492}
493
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700494static int __init intel_iommu_setup(char *str)
495{
496 if (!str)
497 return -EINVAL;
498 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800499 if (!strncmp(str, "on", 2)) {
500 dmar_disabled = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200501 pr_info("IOMMU enabled\n");
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800502 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700503 dmar_disabled = 1;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200504 pr_info("IOMMU disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700505 } else if (!strncmp(str, "igfx_off", 8)) {
506 dmar_map_gfx = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200507 pr_info("Disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700508 } else if (!strncmp(str, "forcedac", 8)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200509 pr_info("Forcing DAC for PCI devices\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700510 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800511 } else if (!strncmp(str, "strict", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200512 pr_info("Disable batched IOTLB flush\n");
mark gross5e0d2a62008-03-04 15:22:08 -0800513 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100514 } else if (!strncmp(str, "sp_off", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200515 pr_info("Disable supported super page\n");
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100516 intel_iommu_superpage = 0;
David Woodhousec83b2f22015-06-12 10:15:49 +0100517 } else if (!strncmp(str, "ecs_off", 7)) {
518 printk(KERN_INFO
519 "Intel-IOMMU: disable extended context table support\n");
520 intel_iommu_ecs = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700521 }
522
523 str += strcspn(str, ",");
524 while (*str == ',')
525 str++;
526 }
527 return 0;
528}
529__setup("intel_iommu=", intel_iommu_setup);
530
531static struct kmem_cache *iommu_domain_cache;
532static struct kmem_cache *iommu_devinfo_cache;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700533
Suresh Siddha4c923d42009-10-02 11:01:24 -0700534static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700535{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700536 struct page *page;
537 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700538
Suresh Siddha4c923d42009-10-02 11:01:24 -0700539 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
540 if (page)
541 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700542 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700543}
544
545static inline void free_pgtable_page(void *vaddr)
546{
547 free_page((unsigned long)vaddr);
548}
549
550static inline void *alloc_domain_mem(void)
551{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900552 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700553}
554
Kay, Allen M38717942008-09-09 18:37:29 +0300555static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700556{
557 kmem_cache_free(iommu_domain_cache, vaddr);
558}
559
560static inline void * alloc_devinfo_mem(void)
561{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900562 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700563}
564
565static inline void free_devinfo_mem(void *vaddr)
566{
567 kmem_cache_free(iommu_devinfo_cache, vaddr);
568}
569
Jiang Liuab8dfe22014-07-11 14:19:27 +0800570static inline int domain_type_is_vm(struct dmar_domain *domain)
571{
572 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
573}
574
575static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
576{
577 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
578 DOMAIN_FLAG_STATIC_IDENTITY);
579}
Weidong Han1b573682008-12-08 15:34:06 +0800580
Jiang Liu162d1b12014-07-11 14:19:35 +0800581static inline int domain_pfn_supported(struct dmar_domain *domain,
582 unsigned long pfn)
583{
584 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
585
586 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
587}
588
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700589static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800590{
591 unsigned long sagaw;
592 int agaw = -1;
593
594 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700595 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800596 agaw >= 0; agaw--) {
597 if (test_bit(agaw, &sagaw))
598 break;
599 }
600
601 return agaw;
602}
603
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700604/*
605 * Calculate max SAGAW for each iommu.
606 */
607int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
608{
609 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
610}
611
612/*
613 * calculate agaw for each iommu.
614 * "SAGAW" may be different across iommus, use a default agaw, and
615 * get a supported less agaw for iommus that don't support the default agaw.
616 */
617int iommu_calculate_agaw(struct intel_iommu *iommu)
618{
619 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
620}
621
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700622/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800623static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
624{
625 int iommu_id;
626
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700627 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800628 BUG_ON(domain_type_is_vm_or_si(domain));
Mike Travis1b198bb2012-03-05 15:05:16 -0800629 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800630 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
631 return NULL;
632
633 return g_iommus[iommu_id];
634}
635
Weidong Han8e6040972008-12-08 15:49:06 +0800636static void domain_update_iommu_coherency(struct dmar_domain *domain)
637{
David Woodhoused0501962014-03-11 17:10:29 -0700638 struct dmar_drhd_unit *drhd;
639 struct intel_iommu *iommu;
Quentin Lambert2f119c72015-02-06 10:59:53 +0100640 bool found = false;
641 int i;
Weidong Han8e6040972008-12-08 15:49:06 +0800642
David Woodhoused0501962014-03-11 17:10:29 -0700643 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800644
Mike Travis1b198bb2012-03-05 15:05:16 -0800645 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Quentin Lambert2f119c72015-02-06 10:59:53 +0100646 found = true;
Weidong Han8e6040972008-12-08 15:49:06 +0800647 if (!ecap_coherent(g_iommus[i]->ecap)) {
648 domain->iommu_coherency = 0;
649 break;
650 }
Weidong Han8e6040972008-12-08 15:49:06 +0800651 }
David Woodhoused0501962014-03-11 17:10:29 -0700652 if (found)
653 return;
654
655 /* No hardware attached; use lowest common denominator */
656 rcu_read_lock();
657 for_each_active_iommu(iommu, drhd) {
658 if (!ecap_coherent(iommu->ecap)) {
659 domain->iommu_coherency = 0;
660 break;
661 }
662 }
663 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800664}
665
Jiang Liu161f6932014-07-11 14:19:37 +0800666static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100667{
Allen Kay8140a952011-10-14 12:32:17 -0700668 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800669 struct intel_iommu *iommu;
670 int ret = 1;
671
672 rcu_read_lock();
673 for_each_active_iommu(iommu, drhd) {
674 if (iommu != skip) {
675 if (!ecap_sc_support(iommu->ecap)) {
676 ret = 0;
677 break;
678 }
679 }
680 }
681 rcu_read_unlock();
682
683 return ret;
684}
685
686static int domain_update_iommu_superpage(struct intel_iommu *skip)
687{
688 struct dmar_drhd_unit *drhd;
689 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700690 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100691
692 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800693 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100694 }
695
Allen Kay8140a952011-10-14 12:32:17 -0700696 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800697 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700698 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800699 if (iommu != skip) {
700 mask &= cap_super_page_val(iommu->cap);
701 if (!mask)
702 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100703 }
704 }
Jiang Liu0e242612014-02-19 14:07:34 +0800705 rcu_read_unlock();
706
Jiang Liu161f6932014-07-11 14:19:37 +0800707 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100708}
709
Sheng Yang58c610b2009-03-18 15:33:05 +0800710/* Some capabilities may be different across iommus */
711static void domain_update_iommu_cap(struct dmar_domain *domain)
712{
713 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800714 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
715 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800716}
717
David Woodhouse03ecc322015-02-13 14:35:21 +0000718static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
719 u8 bus, u8 devfn, int alloc)
720{
721 struct root_entry *root = &iommu->root_entry[bus];
722 struct context_entry *context;
723 u64 *entry;
724
David Woodhousec83b2f22015-06-12 10:15:49 +0100725 if (ecs_enabled(iommu)) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000726 if (devfn >= 0x80) {
727 devfn -= 0x80;
728 entry = &root->hi;
729 }
730 devfn *= 2;
731 }
732 entry = &root->lo;
733 if (*entry & 1)
734 context = phys_to_virt(*entry & VTD_PAGE_MASK);
735 else {
736 unsigned long phy_addr;
737 if (!alloc)
738 return NULL;
739
740 context = alloc_pgtable_page(iommu->node);
741 if (!context)
742 return NULL;
743
744 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
745 phy_addr = virt_to_phys((void *)context);
746 *entry = phy_addr | 1;
747 __iommu_flush_cache(iommu, entry, sizeof(*entry));
748 }
749 return &context[devfn];
750}
751
David Woodhouse4ed6a542015-05-11 14:59:20 +0100752static int iommu_dummy(struct device *dev)
753{
754 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
755}
756
David Woodhouse156baca2014-03-09 14:00:57 -0700757static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800758{
759 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800760 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700761 struct device *tmp;
762 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800763 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800764 int i;
765
David Woodhouse4ed6a542015-05-11 14:59:20 +0100766 if (iommu_dummy(dev))
767 return NULL;
768
David Woodhouse156baca2014-03-09 14:00:57 -0700769 if (dev_is_pci(dev)) {
770 pdev = to_pci_dev(dev);
771 segment = pci_domain_nr(pdev->bus);
Rafael J. Wysockica5b74d2015-03-16 23:49:08 +0100772 } else if (has_acpi_companion(dev))
David Woodhouse156baca2014-03-09 14:00:57 -0700773 dev = &ACPI_COMPANION(dev)->dev;
774
Jiang Liu0e242612014-02-19 14:07:34 +0800775 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800776 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700777 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100778 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800779
Jiang Liub683b232014-02-19 14:07:32 +0800780 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700781 drhd->devices_cnt, i, tmp) {
782 if (tmp == dev) {
783 *bus = drhd->devices[i].bus;
784 *devfn = drhd->devices[i].devfn;
785 goto out;
786 }
787
788 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000789 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700790
791 ptmp = to_pci_dev(tmp);
792 if (ptmp->subordinate &&
793 ptmp->subordinate->number <= pdev->bus->number &&
794 ptmp->subordinate->busn_res.end >= pdev->bus->number)
795 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100796 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800797
David Woodhouse156baca2014-03-09 14:00:57 -0700798 if (pdev && drhd->include_all) {
799 got_pdev:
800 *bus = pdev->bus->number;
801 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800802 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700803 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800804 }
Jiang Liub683b232014-02-19 14:07:32 +0800805 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700806 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800807 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800808
Jiang Liub683b232014-02-19 14:07:32 +0800809 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800810}
811
Weidong Han5331fe62008-12-08 23:00:00 +0800812static void domain_flush_cache(struct dmar_domain *domain,
813 void *addr, int size)
814{
815 if (!domain->iommu_coherency)
816 clflush_cache_range(addr, size);
817}
818
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700819static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
820{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700821 struct context_entry *context;
David Woodhouse03ecc322015-02-13 14:35:21 +0000822 int ret = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700823 unsigned long flags;
824
825 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000826 context = iommu_context_addr(iommu, bus, devfn, 0);
827 if (context)
828 ret = context_present(context);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700829 spin_unlock_irqrestore(&iommu->lock, flags);
830 return ret;
831}
832
833static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
834{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700835 struct context_entry *context;
836 unsigned long flags;
837
838 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000839 context = iommu_context_addr(iommu, bus, devfn, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700840 if (context) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000841 context_clear_entry(context);
842 __iommu_flush_cache(iommu, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700843 }
844 spin_unlock_irqrestore(&iommu->lock, flags);
845}
846
847static void free_context_table(struct intel_iommu *iommu)
848{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700849 int i;
850 unsigned long flags;
851 struct context_entry *context;
852
853 spin_lock_irqsave(&iommu->lock, flags);
854 if (!iommu->root_entry) {
855 goto out;
856 }
857 for (i = 0; i < ROOT_ENTRY_NR; i++) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000858 context = iommu_context_addr(iommu, i, 0, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700859 if (context)
860 free_pgtable_page(context);
David Woodhouse03ecc322015-02-13 14:35:21 +0000861
David Woodhousec83b2f22015-06-12 10:15:49 +0100862 if (!ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +0000863 continue;
864
865 context = iommu_context_addr(iommu, i, 0x80, 0);
866 if (context)
867 free_pgtable_page(context);
868
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700869 }
870 free_pgtable_page(iommu->root_entry);
871 iommu->root_entry = NULL;
872out:
873 spin_unlock_irqrestore(&iommu->lock, flags);
874}
875
David Woodhouseb026fd22009-06-28 10:37:25 +0100876static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000877 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700878{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700879 struct dma_pte *parent, *pte = NULL;
880 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700881 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700882
883 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200884
Jiang Liu162d1b12014-07-11 14:19:35 +0800885 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200886 /* Address beyond IOMMU's addressing capabilities. */
887 return NULL;
888
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700889 parent = domain->pgd;
890
David Woodhouse5cf0a762014-03-19 16:07:49 +0000891 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700892 void *tmp_page;
893
David Woodhouseb026fd22009-06-28 10:37:25 +0100894 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700895 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000896 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100897 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000898 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700899 break;
900
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000901 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100902 uint64_t pteval;
903
Suresh Siddha4c923d42009-10-02 11:01:24 -0700904 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700905
David Woodhouse206a73c12009-07-01 19:30:28 +0100906 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700907 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100908
David Woodhousec85994e2009-07-01 19:21:24 +0100909 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400910 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +0800911 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +0100912 /* Someone else set it while we were thinking; use theirs. */
913 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +0800914 else
David Woodhousec85994e2009-07-01 19:21:24 +0100915 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700916 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000917 if (level == 1)
918 break;
919
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000920 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700921 level--;
922 }
923
David Woodhouse5cf0a762014-03-19 16:07:49 +0000924 if (!*target_level)
925 *target_level = level;
926
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700927 return pte;
928}
929
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100930
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700931/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100932static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
933 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100934 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700935{
936 struct dma_pte *parent, *pte = NULL;
937 int total = agaw_to_level(domain->agaw);
938 int offset;
939
940 parent = domain->pgd;
941 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100942 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700943 pte = &parent[offset];
944 if (level == total)
945 return pte;
946
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100947 if (!dma_pte_present(pte)) {
948 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700949 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100950 }
951
Yijing Wange16922a2014-05-20 20:37:51 +0800952 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100953 *large_page = total;
954 return pte;
955 }
956
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000957 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700958 total--;
959 }
960 return NULL;
961}
962
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700963/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +0000964static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100965 unsigned long start_pfn,
966 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700967{
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100968 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100969 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700970
Jiang Liu162d1b12014-07-11 14:19:35 +0800971 BUG_ON(!domain_pfn_supported(domain, start_pfn));
972 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700973 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100974
David Woodhouse04b18e62009-06-27 19:15:01 +0100975 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700976 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100977 large_page = 1;
978 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100979 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100980 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100981 continue;
982 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100983 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100984 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100985 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100986 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100987 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
988
David Woodhouse310a5ab2009-06-28 18:52:20 +0100989 domain_flush_cache(domain, first_pte,
990 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700991
992 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700993}
994
Alex Williamson3269ee02013-06-15 10:27:19 -0600995static void dma_pte_free_level(struct dmar_domain *domain, int level,
996 struct dma_pte *pte, unsigned long pfn,
997 unsigned long start_pfn, unsigned long last_pfn)
998{
999 pfn = max(start_pfn, pfn);
1000 pte = &pte[pfn_level_offset(pfn, level)];
1001
1002 do {
1003 unsigned long level_pfn;
1004 struct dma_pte *level_pte;
1005
1006 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1007 goto next;
1008
1009 level_pfn = pfn & level_mask(level - 1);
1010 level_pte = phys_to_virt(dma_pte_addr(pte));
1011
1012 if (level > 2)
1013 dma_pte_free_level(domain, level - 1, level_pte,
1014 level_pfn, start_pfn, last_pfn);
1015
1016 /* If range covers entire pagetable, free it */
1017 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -08001018 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -06001019 dma_clear_pte(pte);
1020 domain_flush_cache(domain, pte, sizeof(*pte));
1021 free_pgtable_page(level_pte);
1022 }
1023next:
1024 pfn += level_size(level);
1025 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1026}
1027
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001028/* free page table pages. last level pte should already be cleared */
1029static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +01001030 unsigned long start_pfn,
1031 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001032{
Jiang Liu162d1b12014-07-11 14:19:35 +08001033 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1034 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001035 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001036
Jiang Liud41a4ad2014-07-11 14:19:34 +08001037 dma_pte_clear_range(domain, start_pfn, last_pfn);
1038
David Woodhousef3a0a522009-06-30 03:40:07 +01001039 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -06001040 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1041 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +01001042
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001043 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +01001044 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001045 free_pgtable_page(domain->pgd);
1046 domain->pgd = NULL;
1047 }
1048}
1049
David Woodhouseea8ea462014-03-05 17:09:32 +00001050/* When a page at a given level is being unlinked from its parent, we don't
1051 need to *modify* it at all. All we need to do is make a list of all the
1052 pages which can be freed just as soon as we've flushed the IOTLB and we
1053 know the hardware page-walk will no longer touch them.
1054 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1055 be freed. */
1056static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1057 int level, struct dma_pte *pte,
1058 struct page *freelist)
1059{
1060 struct page *pg;
1061
1062 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1063 pg->freelist = freelist;
1064 freelist = pg;
1065
1066 if (level == 1)
1067 return freelist;
1068
Jiang Liuadeb2592014-04-09 10:20:39 +08001069 pte = page_address(pg);
1070 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001071 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1072 freelist = dma_pte_list_pagetables(domain, level - 1,
1073 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001074 pte++;
1075 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001076
1077 return freelist;
1078}
1079
1080static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1081 struct dma_pte *pte, unsigned long pfn,
1082 unsigned long start_pfn,
1083 unsigned long last_pfn,
1084 struct page *freelist)
1085{
1086 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1087
1088 pfn = max(start_pfn, pfn);
1089 pte = &pte[pfn_level_offset(pfn, level)];
1090
1091 do {
1092 unsigned long level_pfn;
1093
1094 if (!dma_pte_present(pte))
1095 goto next;
1096
1097 level_pfn = pfn & level_mask(level);
1098
1099 /* If range covers entire pagetable, free it */
1100 if (start_pfn <= level_pfn &&
1101 last_pfn >= level_pfn + level_size(level) - 1) {
1102 /* These suborbinate page tables are going away entirely. Don't
1103 bother to clear them; we're just going to *free* them. */
1104 if (level > 1 && !dma_pte_superpage(pte))
1105 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1106
1107 dma_clear_pte(pte);
1108 if (!first_pte)
1109 first_pte = pte;
1110 last_pte = pte;
1111 } else if (level > 1) {
1112 /* Recurse down into a level that isn't *entirely* obsolete */
1113 freelist = dma_pte_clear_level(domain, level - 1,
1114 phys_to_virt(dma_pte_addr(pte)),
1115 level_pfn, start_pfn, last_pfn,
1116 freelist);
1117 }
1118next:
1119 pfn += level_size(level);
1120 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1121
1122 if (first_pte)
1123 domain_flush_cache(domain, first_pte,
1124 (void *)++last_pte - (void *)first_pte);
1125
1126 return freelist;
1127}
1128
1129/* We can't just free the pages because the IOMMU may still be walking
1130 the page tables, and may have cached the intermediate levels. The
1131 pages can only be freed after the IOTLB flush has been done. */
1132struct page *domain_unmap(struct dmar_domain *domain,
1133 unsigned long start_pfn,
1134 unsigned long last_pfn)
1135{
David Woodhouseea8ea462014-03-05 17:09:32 +00001136 struct page *freelist = NULL;
1137
Jiang Liu162d1b12014-07-11 14:19:35 +08001138 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1139 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001140 BUG_ON(start_pfn > last_pfn);
1141
1142 /* we don't need lock here; nobody else touches the iova range */
1143 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1144 domain->pgd, 0, start_pfn, last_pfn, NULL);
1145
1146 /* free pgd */
1147 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1148 struct page *pgd_page = virt_to_page(domain->pgd);
1149 pgd_page->freelist = freelist;
1150 freelist = pgd_page;
1151
1152 domain->pgd = NULL;
1153 }
1154
1155 return freelist;
1156}
1157
1158void dma_free_pagelist(struct page *freelist)
1159{
1160 struct page *pg;
1161
1162 while ((pg = freelist)) {
1163 freelist = pg->freelist;
1164 free_pgtable_page(page_address(pg));
1165 }
1166}
1167
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001168/* iommu handling */
1169static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1170{
1171 struct root_entry *root;
1172 unsigned long flags;
1173
Suresh Siddha4c923d42009-10-02 11:01:24 -07001174 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001175 if (!root) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001176 pr_err("Allocating root entry for %s failed\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08001177 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001178 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001179 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001180
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001181 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001182
1183 spin_lock_irqsave(&iommu->lock, flags);
1184 iommu->root_entry = root;
1185 spin_unlock_irqrestore(&iommu->lock, flags);
1186
1187 return 0;
1188}
1189
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001190static void iommu_set_root_entry(struct intel_iommu *iommu)
1191{
David Woodhouse03ecc322015-02-13 14:35:21 +00001192 u64 addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001193 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001194 unsigned long flag;
1195
David Woodhouse03ecc322015-02-13 14:35:21 +00001196 addr = virt_to_phys(iommu->root_entry);
David Woodhousec83b2f22015-06-12 10:15:49 +01001197 if (ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +00001198 addr |= DMA_RTADDR_RTT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001199
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001200 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse03ecc322015-02-13 14:35:21 +00001201 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001202
David Woodhousec416daa2009-05-10 20:30:58 +01001203 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001204
1205 /* Make sure hardware complete it */
1206 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001207 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001208
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001209 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001210}
1211
1212static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1213{
1214 u32 val;
1215 unsigned long flag;
1216
David Woodhouse9af88142009-02-13 23:18:03 +00001217 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001218 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001219
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001220 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001221 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001222
1223 /* Make sure hardware complete it */
1224 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001225 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001226
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001227 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001228}
1229
1230/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001231static void __iommu_flush_context(struct intel_iommu *iommu,
1232 u16 did, u16 source_id, u8 function_mask,
1233 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001234{
1235 u64 val = 0;
1236 unsigned long flag;
1237
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001238 switch (type) {
1239 case DMA_CCMD_GLOBAL_INVL:
1240 val = DMA_CCMD_GLOBAL_INVL;
1241 break;
1242 case DMA_CCMD_DOMAIN_INVL:
1243 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1244 break;
1245 case DMA_CCMD_DEVICE_INVL:
1246 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1247 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1248 break;
1249 default:
1250 BUG();
1251 }
1252 val |= DMA_CCMD_ICC;
1253
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001254 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001255 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1256
1257 /* Make sure hardware complete it */
1258 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1259 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1260
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001261 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001262}
1263
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001264/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001265static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1266 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001267{
1268 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1269 u64 val = 0, val_iva = 0;
1270 unsigned long flag;
1271
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001272 switch (type) {
1273 case DMA_TLB_GLOBAL_FLUSH:
1274 /* global flush doesn't need set IVA_REG */
1275 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1276 break;
1277 case DMA_TLB_DSI_FLUSH:
1278 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1279 break;
1280 case DMA_TLB_PSI_FLUSH:
1281 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001282 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001283 val_iva = size_order | addr;
1284 break;
1285 default:
1286 BUG();
1287 }
1288 /* Note: set drain read/write */
1289#if 0
1290 /*
1291 * This is probably to be super secure.. Looks like we can
1292 * ignore it without any impact.
1293 */
1294 if (cap_read_drain(iommu->cap))
1295 val |= DMA_TLB_READ_DRAIN;
1296#endif
1297 if (cap_write_drain(iommu->cap))
1298 val |= DMA_TLB_WRITE_DRAIN;
1299
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001300 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001301 /* Note: Only uses first TLB reg currently */
1302 if (val_iva)
1303 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1304 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1305
1306 /* Make sure hardware complete it */
1307 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1308 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1309
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001310 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001311
1312 /* check IOTLB invalidation granularity */
1313 if (DMA_TLB_IAIG(val) == 0)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001314 pr_err("Flush IOTLB failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001315 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001316 pr_debug("TLB flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001317 (unsigned long long)DMA_TLB_IIRG(type),
1318 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001319}
1320
David Woodhouse64ae8922014-03-09 12:52:30 -07001321static struct device_domain_info *
1322iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1323 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001324{
Quentin Lambert2f119c72015-02-06 10:59:53 +01001325 bool found = false;
Yu Zhao93a23a72009-05-18 13:51:37 +08001326 unsigned long flags;
1327 struct device_domain_info *info;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001328 struct pci_dev *pdev;
Yu Zhao93a23a72009-05-18 13:51:37 +08001329
1330 if (!ecap_dev_iotlb_support(iommu->ecap))
1331 return NULL;
1332
1333 if (!iommu->qi)
1334 return NULL;
1335
1336 spin_lock_irqsave(&device_domain_lock, flags);
1337 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001338 if (info->iommu == iommu && info->bus == bus &&
1339 info->devfn == devfn) {
Quentin Lambert2f119c72015-02-06 10:59:53 +01001340 found = true;
Yu Zhao93a23a72009-05-18 13:51:37 +08001341 break;
1342 }
1343 spin_unlock_irqrestore(&device_domain_lock, flags);
1344
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001345 if (!found || !info->dev || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001346 return NULL;
1347
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001348 pdev = to_pci_dev(info->dev);
1349
1350 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Yu Zhao93a23a72009-05-18 13:51:37 +08001351 return NULL;
1352
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001353 if (!dmar_find_matched_atsr_unit(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001354 return NULL;
1355
Yu Zhao93a23a72009-05-18 13:51:37 +08001356 return info;
1357}
1358
1359static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1360{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001361 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001362 return;
1363
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001364 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Yu Zhao93a23a72009-05-18 13:51:37 +08001365}
1366
1367static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1368{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001369 if (!info->dev || !dev_is_pci(info->dev) ||
1370 !pci_ats_enabled(to_pci_dev(info->dev)))
Yu Zhao93a23a72009-05-18 13:51:37 +08001371 return;
1372
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001373 pci_disable_ats(to_pci_dev(info->dev));
Yu Zhao93a23a72009-05-18 13:51:37 +08001374}
1375
1376static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1377 u64 addr, unsigned mask)
1378{
1379 u16 sid, qdep;
1380 unsigned long flags;
1381 struct device_domain_info *info;
1382
1383 spin_lock_irqsave(&device_domain_lock, flags);
1384 list_for_each_entry(info, &domain->devices, link) {
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001385 struct pci_dev *pdev;
1386 if (!info->dev || !dev_is_pci(info->dev))
1387 continue;
1388
1389 pdev = to_pci_dev(info->dev);
1390 if (!pci_ats_enabled(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001391 continue;
1392
1393 sid = info->bus << 8 | info->devfn;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001394 qdep = pci_ats_queue_depth(pdev);
Yu Zhao93a23a72009-05-18 13:51:37 +08001395 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1396 }
1397 spin_unlock_irqrestore(&device_domain_lock, flags);
1398}
1399
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001400static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouseea8ea462014-03-05 17:09:32 +00001401 unsigned long pfn, unsigned int pages, int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001402{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001403 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001404 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001405
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001406 BUG_ON(pages == 0);
1407
David Woodhouseea8ea462014-03-05 17:09:32 +00001408 if (ih)
1409 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001410 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001411 * Fallback to domain selective flush if no PSI support or the size is
1412 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001413 * PSI requires page size to be 2 ^ x, and the base address is naturally
1414 * aligned to the size
1415 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001416 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1417 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001418 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001419 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001420 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001421 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001422
1423 /*
Nadav Amit82653632010-04-01 13:24:40 +03001424 * In caching mode, changes of pages from non-present to present require
1425 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001426 */
Nadav Amit82653632010-04-01 13:24:40 +03001427 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001428 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001429}
1430
mark grossf8bab732008-02-08 04:18:38 -08001431static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1432{
1433 u32 pmen;
1434 unsigned long flags;
1435
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001436 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001437 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1438 pmen &= ~DMA_PMEN_EPM;
1439 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1440
1441 /* wait for the protected region status bit to clear */
1442 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1443 readl, !(pmen & DMA_PMEN_PRS), pmen);
1444
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001445 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001446}
1447
Jiang Liu2a41cce2014-07-11 14:19:33 +08001448static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001449{
1450 u32 sts;
1451 unsigned long flags;
1452
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001453 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001454 iommu->gcmd |= DMA_GCMD_TE;
1455 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001456
1457 /* Make sure hardware complete it */
1458 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001459 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001460
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001461 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001462}
1463
Jiang Liu2a41cce2014-07-11 14:19:33 +08001464static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001465{
1466 u32 sts;
1467 unsigned long flag;
1468
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001469 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001470 iommu->gcmd &= ~DMA_GCMD_TE;
1471 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1472
1473 /* Make sure hardware complete it */
1474 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001475 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001476
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001477 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001478}
1479
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001480
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001481static int iommu_init_domains(struct intel_iommu *iommu)
1482{
1483 unsigned long ndomains;
1484 unsigned long nlongs;
1485
1486 ndomains = cap_ndoms(iommu->cap);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001487 pr_debug("%s: Number of Domains supported <%ld>\n",
1488 iommu->name, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001489 nlongs = BITS_TO_LONGS(ndomains);
1490
Donald Dutile94a91b52009-08-20 16:51:34 -04001491 spin_lock_init(&iommu->lock);
1492
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001493 /* TBD: there might be 64K domains,
1494 * consider other allocation for future chip
1495 */
1496 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1497 if (!iommu->domain_ids) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001498 pr_err("%s: Allocating domain id array failed\n",
1499 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001500 return -ENOMEM;
1501 }
1502 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1503 GFP_KERNEL);
1504 if (!iommu->domains) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001505 pr_err("%s: Allocating domain array failed\n",
1506 iommu->name);
Jiang Liu852bdb02014-01-06 14:18:11 +08001507 kfree(iommu->domain_ids);
1508 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001509 return -ENOMEM;
1510 }
1511
1512 /*
1513 * if Caching mode is set, then invalid translations are tagged
1514 * with domainid 0. Hence we need to pre-allocate it.
1515 */
1516 if (cap_caching_mode(iommu->cap))
1517 set_bit(0, iommu->domain_ids);
1518 return 0;
1519}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001520
Jiang Liuffebeb42014-11-09 22:48:02 +08001521static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001522{
1523 struct dmar_domain *domain;
Jiang Liu2a46ddf2014-07-11 14:19:30 +08001524 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001525
Donald Dutile94a91b52009-08-20 16:51:34 -04001526 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001527 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001528 /*
1529 * Domain id 0 is reserved for invalid translation
1530 * if hardware supports caching mode.
1531 */
1532 if (cap_caching_mode(iommu->cap) && i == 0)
1533 continue;
1534
Donald Dutile94a91b52009-08-20 16:51:34 -04001535 domain = iommu->domains[i];
1536 clear_bit(i, iommu->domain_ids);
Jiang Liu129ad282014-07-11 14:19:31 +08001537 if (domain_detach_iommu(domain, iommu) == 0 &&
1538 !domain_type_is_vm(domain))
Jiang Liu92d03cc2014-02-19 14:07:28 +08001539 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001540 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001541 }
1542
1543 if (iommu->gcmd & DMA_GCMD_TE)
1544 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001545}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001546
Jiang Liuffebeb42014-11-09 22:48:02 +08001547static void free_dmar_iommu(struct intel_iommu *iommu)
1548{
1549 if ((iommu->domains) && (iommu->domain_ids)) {
1550 kfree(iommu->domains);
1551 kfree(iommu->domain_ids);
1552 iommu->domains = NULL;
1553 iommu->domain_ids = NULL;
1554 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001555
Weidong Hand9630fe2008-12-08 11:06:32 +08001556 g_iommus[iommu->seq_id] = NULL;
1557
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001558 /* free context mapping */
1559 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001560}
1561
Jiang Liuab8dfe22014-07-11 14:19:27 +08001562static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001563{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001564 /* domain id for virtual machine, it won't be set in context */
1565 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001566 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001567
1568 domain = alloc_domain_mem();
1569 if (!domain)
1570 return NULL;
1571
Jiang Liuab8dfe22014-07-11 14:19:27 +08001572 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001573 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001574 domain->flags = flags;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001575 spin_lock_init(&domain->iommu_lock);
1576 INIT_LIST_HEAD(&domain->devices);
Jiang Liuab8dfe22014-07-11 14:19:27 +08001577 if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001578 domain->id = atomic_inc_return(&vm_domid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001579
1580 return domain;
1581}
1582
Jiang Liufb170fb2014-07-11 14:19:28 +08001583static int __iommu_attach_domain(struct dmar_domain *domain,
1584 struct intel_iommu *iommu)
1585{
1586 int num;
1587 unsigned long ndomains;
1588
1589 ndomains = cap_ndoms(iommu->cap);
1590 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1591 if (num < ndomains) {
1592 set_bit(num, iommu->domain_ids);
1593 iommu->domains[num] = domain;
1594 } else {
1595 num = -ENOSPC;
1596 }
1597
1598 return num;
1599}
1600
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001601static int iommu_attach_domain(struct dmar_domain *domain,
1602 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001603{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001604 int num;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001605 unsigned long flags;
1606
Weidong Han8c11e792008-12-08 15:29:22 +08001607 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001608 num = __iommu_attach_domain(domain, iommu);
Jiang Liu44bde612014-07-11 14:19:29 +08001609 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001610 if (num < 0)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001611 pr_err("%s: No free domain ids\n", iommu->name);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001612
Jiang Liufb170fb2014-07-11 14:19:28 +08001613 return num;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001614}
1615
Jiang Liu44bde612014-07-11 14:19:29 +08001616static int iommu_attach_vm_domain(struct dmar_domain *domain,
1617 struct intel_iommu *iommu)
1618{
1619 int num;
1620 unsigned long ndomains;
1621
1622 ndomains = cap_ndoms(iommu->cap);
1623 for_each_set_bit(num, iommu->domain_ids, ndomains)
1624 if (iommu->domains[num] == domain)
1625 return num;
1626
1627 return __iommu_attach_domain(domain, iommu);
1628}
1629
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001630static void iommu_detach_domain(struct dmar_domain *domain,
1631 struct intel_iommu *iommu)
1632{
1633 unsigned long flags;
1634 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001635
1636 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001637 if (domain_type_is_vm_or_si(domain)) {
1638 ndomains = cap_ndoms(iommu->cap);
1639 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1640 if (iommu->domains[num] == domain) {
1641 clear_bit(num, iommu->domain_ids);
1642 iommu->domains[num] = NULL;
1643 break;
1644 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001645 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001646 } else {
1647 clear_bit(domain->id, iommu->domain_ids);
1648 iommu->domains[domain->id] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001649 }
Weidong Han8c11e792008-12-08 15:29:22 +08001650 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001651}
1652
Jiang Liufb170fb2014-07-11 14:19:28 +08001653static void domain_attach_iommu(struct dmar_domain *domain,
1654 struct intel_iommu *iommu)
1655{
1656 unsigned long flags;
1657
1658 spin_lock_irqsave(&domain->iommu_lock, flags);
1659 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1660 domain->iommu_count++;
1661 if (domain->iommu_count == 1)
1662 domain->nid = iommu->node;
1663 domain_update_iommu_cap(domain);
1664 }
1665 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1666}
1667
1668static int domain_detach_iommu(struct dmar_domain *domain,
1669 struct intel_iommu *iommu)
1670{
1671 unsigned long flags;
1672 int count = INT_MAX;
1673
1674 spin_lock_irqsave(&domain->iommu_lock, flags);
1675 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1676 count = --domain->iommu_count;
1677 domain_update_iommu_cap(domain);
1678 }
1679 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1680
1681 return count;
1682}
1683
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001684static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001685static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001686
Joseph Cihula51a63e62011-03-21 11:04:24 -07001687static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001688{
1689 struct pci_dev *pdev = NULL;
1690 struct iova *iova;
1691 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001692
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001693 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1694 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001695
Mark Gross8a443df2008-03-04 14:59:31 -08001696 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1697 &reserved_rbtree_key);
1698
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001699 /* IOAPIC ranges shouldn't be accessed by DMA */
1700 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1701 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001702 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001703 pr_err("Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001704 return -ENODEV;
1705 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001706
1707 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1708 for_each_pci_dev(pdev) {
1709 struct resource *r;
1710
1711 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1712 r = &pdev->resource[i];
1713 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1714 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001715 iova = reserve_iova(&reserved_iova_list,
1716 IOVA_PFN(r->start),
1717 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001718 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001719 pr_err("Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001720 return -ENODEV;
1721 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001722 }
1723 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001724 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001725}
1726
1727static void domain_reserve_special_ranges(struct dmar_domain *domain)
1728{
1729 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1730}
1731
1732static inline int guestwidth_to_adjustwidth(int gaw)
1733{
1734 int agaw;
1735 int r = (gaw - 12) % 9;
1736
1737 if (r == 0)
1738 agaw = gaw;
1739 else
1740 agaw = gaw + 9 - r;
1741 if (agaw > 64)
1742 agaw = 64;
1743 return agaw;
1744}
1745
1746static int domain_init(struct dmar_domain *domain, int guest_width)
1747{
1748 struct intel_iommu *iommu;
1749 int adjust_width, agaw;
1750 unsigned long sagaw;
1751
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001752 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1753 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001754 domain_reserve_special_ranges(domain);
1755
1756 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001757 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001758 if (guest_width > cap_mgaw(iommu->cap))
1759 guest_width = cap_mgaw(iommu->cap);
1760 domain->gaw = guest_width;
1761 adjust_width = guestwidth_to_adjustwidth(guest_width);
1762 agaw = width_to_agaw(adjust_width);
1763 sagaw = cap_sagaw(iommu->cap);
1764 if (!test_bit(agaw, &sagaw)) {
1765 /* hardware doesn't support it, choose a bigger one */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001766 pr_debug("Hardware doesn't support agaw %d\n", agaw);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001767 agaw = find_next_bit(&sagaw, 5, agaw);
1768 if (agaw >= 5)
1769 return -ENODEV;
1770 }
1771 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001772
Weidong Han8e6040972008-12-08 15:49:06 +08001773 if (ecap_coherent(iommu->ecap))
1774 domain->iommu_coherency = 1;
1775 else
1776 domain->iommu_coherency = 0;
1777
Sheng Yang58c610b2009-03-18 15:33:05 +08001778 if (ecap_sc_support(iommu->ecap))
1779 domain->iommu_snooping = 1;
1780 else
1781 domain->iommu_snooping = 0;
1782
David Woodhouse214e39a2014-03-19 10:38:49 +00001783 if (intel_iommu_superpage)
1784 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1785 else
1786 domain->iommu_superpage = 0;
1787
Suresh Siddha4c923d42009-10-02 11:01:24 -07001788 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001789
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001790 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001791 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001792 if (!domain->pgd)
1793 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001794 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001795 return 0;
1796}
1797
1798static void domain_exit(struct dmar_domain *domain)
1799{
David Woodhouseea8ea462014-03-05 17:09:32 +00001800 struct page *freelist = NULL;
Alex Williamson71684402015-03-04 11:30:10 -07001801 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001802
1803 /* Domain 0 is reserved, so dont process it */
1804 if (!domain)
1805 return;
1806
Alex Williamson7b668352011-05-24 12:02:41 +01001807 /* Flush any lazy unmaps that may reference this domain */
1808 if (!intel_iommu_strict)
1809 flush_unmaps_timeout(0);
1810
Jiang Liu92d03cc2014-02-19 14:07:28 +08001811 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001812 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001813
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001814 /* destroy iovas */
1815 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001816
David Woodhouseea8ea462014-03-05 17:09:32 +00001817 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001818
Jiang Liu92d03cc2014-02-19 14:07:28 +08001819 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001820 rcu_read_lock();
Alex Williamson71684402015-03-04 11:30:10 -07001821 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
1822 iommu_detach_domain(domain, g_iommus[i]);
Jiang Liu0e242612014-02-19 14:07:34 +08001823 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001824
David Woodhouseea8ea462014-03-05 17:09:32 +00001825 dma_free_pagelist(freelist);
1826
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001827 free_domain_mem(domain);
1828}
1829
David Woodhouse64ae8922014-03-09 12:52:30 -07001830static int domain_context_mapping_one(struct dmar_domain *domain,
1831 struct intel_iommu *iommu,
1832 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001833{
1834 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001835 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001836 struct dma_pte *pgd;
Weidong Hanea6606b2008-12-08 23:08:15 +08001837 int id;
1838 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001839 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001840
1841 pr_debug("Set context mapping for %02x:%02x.%d\n",
1842 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001843
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001844 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001845 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1846 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001847
David Woodhouse03ecc322015-02-13 14:35:21 +00001848 spin_lock_irqsave(&iommu->lock, flags);
1849 context = iommu_context_addr(iommu, bus, devfn, 1);
1850 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001851 if (!context)
1852 return -ENOMEM;
1853 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001854 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001855 spin_unlock_irqrestore(&iommu->lock, flags);
1856 return 0;
1857 }
1858
Weidong Hanea6606b2008-12-08 23:08:15 +08001859 id = domain->id;
1860 pgd = domain->pgd;
1861
Jiang Liuab8dfe22014-07-11 14:19:27 +08001862 if (domain_type_is_vm_or_si(domain)) {
Jiang Liu44bde612014-07-11 14:19:29 +08001863 if (domain_type_is_vm(domain)) {
1864 id = iommu_attach_vm_domain(domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08001865 if (id < 0) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001866 spin_unlock_irqrestore(&iommu->lock, flags);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001867 pr_err("%s: No free domain ids\n", iommu->name);
Weidong Hanea6606b2008-12-08 23:08:15 +08001868 return -EFAULT;
1869 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001870 }
1871
1872 /* Skip top levels of page tables for
1873 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001874 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001875 */
Chris Wright1672af12009-12-02 12:06:34 -08001876 if (translation != CONTEXT_TT_PASS_THROUGH) {
1877 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1878 pgd = phys_to_virt(dma_pte_addr(pgd));
1879 if (!dma_pte_present(pgd)) {
1880 spin_unlock_irqrestore(&iommu->lock, flags);
1881 return -ENOMEM;
1882 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001883 }
1884 }
1885 }
1886
1887 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001888
Yu Zhao93a23a72009-05-18 13:51:37 +08001889 if (translation != CONTEXT_TT_PASS_THROUGH) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001890 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Yu Zhao93a23a72009-05-18 13:51:37 +08001891 translation = info ? CONTEXT_TT_DEV_IOTLB :
1892 CONTEXT_TT_MULTI_LEVEL;
1893 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001894 /*
1895 * In pass through mode, AW must be programmed to indicate the largest
1896 * AGAW value supported by hardware. And ASR is ignored by hardware.
1897 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001898 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001899 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001900 else {
1901 context_set_address_root(context, virt_to_phys(pgd));
1902 context_set_address_width(context, iommu->agaw);
1903 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001904
1905 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001906 context_set_fault_enable(context);
1907 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001908 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001909
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001910 /*
1911 * It's a non-present to present mapping. If hardware doesn't cache
1912 * non-present entry we only need to flush the write-buffer. If the
1913 * _does_ cache non-present entries, then it does so in the special
1914 * domain #0, which we have to flush:
1915 */
1916 if (cap_caching_mode(iommu->cap)) {
1917 iommu->flush.flush_context(iommu, 0,
1918 (((u16)bus) << 8) | devfn,
1919 DMA_CCMD_MASK_NOBIT,
1920 DMA_CCMD_DEVICE_INVL);
Jiang Liu18fd7792014-07-11 14:19:26 +08001921 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001922 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001923 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001924 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001925 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001926 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001927
Jiang Liufb170fb2014-07-11 14:19:28 +08001928 domain_attach_iommu(domain, iommu);
1929
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001930 return 0;
1931}
1932
Alex Williamson579305f2014-07-03 09:51:43 -06001933struct domain_context_mapping_data {
1934 struct dmar_domain *domain;
1935 struct intel_iommu *iommu;
1936 int translation;
1937};
1938
1939static int domain_context_mapping_cb(struct pci_dev *pdev,
1940 u16 alias, void *opaque)
1941{
1942 struct domain_context_mapping_data *data = opaque;
1943
1944 return domain_context_mapping_one(data->domain, data->iommu,
1945 PCI_BUS_NUM(alias), alias & 0xff,
1946 data->translation);
1947}
1948
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001949static int
David Woodhousee1f167f2014-03-09 15:24:46 -07001950domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1951 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001952{
David Woodhouse64ae8922014-03-09 12:52:30 -07001953 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001954 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06001955 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001956
David Woodhousee1f167f2014-03-09 15:24:46 -07001957 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07001958 if (!iommu)
1959 return -ENODEV;
1960
Alex Williamson579305f2014-07-03 09:51:43 -06001961 if (!dev_is_pci(dev))
1962 return domain_context_mapping_one(domain, iommu, bus, devfn,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001963 translation);
Alex Williamson579305f2014-07-03 09:51:43 -06001964
1965 data.domain = domain;
1966 data.iommu = iommu;
1967 data.translation = translation;
1968
1969 return pci_for_each_dma_alias(to_pci_dev(dev),
1970 &domain_context_mapping_cb, &data);
1971}
1972
1973static int domain_context_mapped_cb(struct pci_dev *pdev,
1974 u16 alias, void *opaque)
1975{
1976 struct intel_iommu *iommu = opaque;
1977
1978 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001979}
1980
David Woodhousee1f167f2014-03-09 15:24:46 -07001981static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001982{
Weidong Han5331fe62008-12-08 23:00:00 +08001983 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001984 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08001985
David Woodhousee1f167f2014-03-09 15:24:46 -07001986 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001987 if (!iommu)
1988 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001989
Alex Williamson579305f2014-07-03 09:51:43 -06001990 if (!dev_is_pci(dev))
1991 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07001992
Alex Williamson579305f2014-07-03 09:51:43 -06001993 return !pci_for_each_dma_alias(to_pci_dev(dev),
1994 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001995}
1996
Fenghua Yuf5329592009-08-04 15:09:37 -07001997/* Returns a number of VTD pages, but aligned to MM page size */
1998static inline unsigned long aligned_nrpages(unsigned long host_addr,
1999 size_t size)
2000{
2001 host_addr &= ~PAGE_MASK;
2002 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2003}
2004
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002005/* Return largest possible superpage level for a given mapping */
2006static inline int hardware_largepage_caps(struct dmar_domain *domain,
2007 unsigned long iov_pfn,
2008 unsigned long phy_pfn,
2009 unsigned long pages)
2010{
2011 int support, level = 1;
2012 unsigned long pfnmerge;
2013
2014 support = domain->iommu_superpage;
2015
2016 /* To use a large page, the virtual *and* physical addresses
2017 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2018 of them will mean we have to use smaller pages. So just
2019 merge them and check both at once. */
2020 pfnmerge = iov_pfn | phy_pfn;
2021
2022 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2023 pages >>= VTD_STRIDE_SHIFT;
2024 if (!pages)
2025 break;
2026 pfnmerge >>= VTD_STRIDE_SHIFT;
2027 level++;
2028 support--;
2029 }
2030 return level;
2031}
2032
David Woodhouse9051aa02009-06-29 12:30:54 +01002033static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2034 struct scatterlist *sg, unsigned long phys_pfn,
2035 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01002036{
2037 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01002038 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08002039 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002040 unsigned int largepage_lvl = 0;
2041 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01002042
Jiang Liu162d1b12014-07-11 14:19:35 +08002043 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01002044
2045 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2046 return -EINVAL;
2047
2048 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2049
Jiang Liucc4f14a2014-11-26 09:42:10 +08002050 if (!sg) {
2051 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01002052 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2053 }
2054
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002055 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002056 uint64_t tmp;
2057
David Woodhousee1605492009-06-29 11:17:38 +01002058 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07002059 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01002060 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2061 sg->dma_length = sg->length;
2062 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002063 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002064 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002065
David Woodhousee1605492009-06-29 11:17:38 +01002066 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002067 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2068
David Woodhouse5cf0a762014-03-19 16:07:49 +00002069 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002070 if (!pte)
2071 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002072 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002073 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002074 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002075 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2076 /*
2077 * Ensure that old small page tables are
2078 * removed to make room for superpage,
2079 * if they exist.
2080 */
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002081 dma_pte_free_pagetable(domain, iov_pfn,
Jiang Liud41a4ad2014-07-11 14:19:34 +08002082 iov_pfn + lvl_pages - 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002083 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002084 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002085 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002086
David Woodhousee1605492009-06-29 11:17:38 +01002087 }
2088 /* We don't need lock here, nobody else
2089 * touches the iova range
2090 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002091 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002092 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002093 static int dumps = 5;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002094 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2095 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002096 if (dumps) {
2097 dumps--;
2098 debug_dma_dump_mappings(NULL);
2099 }
2100 WARN_ON(1);
2101 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002102
2103 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2104
2105 BUG_ON(nr_pages < lvl_pages);
2106 BUG_ON(sg_res < lvl_pages);
2107
2108 nr_pages -= lvl_pages;
2109 iov_pfn += lvl_pages;
2110 phys_pfn += lvl_pages;
2111 pteval += lvl_pages * VTD_PAGE_SIZE;
2112 sg_res -= lvl_pages;
2113
2114 /* If the next PTE would be the first in a new page, then we
2115 need to flush the cache on the entries we've just written.
2116 And then we'll need to recalculate 'pte', so clear it and
2117 let it get set again in the if (!pte) block above.
2118
2119 If we're done (!nr_pages) we need to flush the cache too.
2120
2121 Also if we've been setting superpages, we may need to
2122 recalculate 'pte' and switch back to smaller pages for the
2123 end of the mapping, if the trailing size is not enough to
2124 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002125 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002126 if (!nr_pages || first_pte_in_page(pte) ||
2127 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002128 domain_flush_cache(domain, first_pte,
2129 (void *)pte - (void *)first_pte);
2130 pte = NULL;
2131 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002132
2133 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002134 sg = sg_next(sg);
2135 }
2136 return 0;
2137}
2138
David Woodhouse9051aa02009-06-29 12:30:54 +01002139static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2140 struct scatterlist *sg, unsigned long nr_pages,
2141 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002142{
David Woodhouse9051aa02009-06-29 12:30:54 +01002143 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2144}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002145
David Woodhouse9051aa02009-06-29 12:30:54 +01002146static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2147 unsigned long phys_pfn, unsigned long nr_pages,
2148 int prot)
2149{
2150 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002151}
2152
Weidong Hanc7151a82008-12-08 22:51:37 +08002153static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002154{
Weidong Hanc7151a82008-12-08 22:51:37 +08002155 if (!iommu)
2156 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002157
2158 clear_context_table(iommu, bus, devfn);
2159 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002160 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002161 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002162}
2163
David Woodhouse109b9b02012-05-25 17:43:02 +01002164static inline void unlink_domain_info(struct device_domain_info *info)
2165{
2166 assert_spin_locked(&device_domain_lock);
2167 list_del(&info->link);
2168 list_del(&info->global);
2169 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002170 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002171}
2172
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002173static void domain_remove_dev_info(struct dmar_domain *domain)
2174{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002175 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002176 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002177
2178 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wang3a74ca02014-05-20 20:37:47 +08002179 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhouse109b9b02012-05-25 17:43:02 +01002180 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002181 spin_unlock_irqrestore(&device_domain_lock, flags);
2182
Yu Zhao93a23a72009-05-18 13:51:37 +08002183 iommu_disable_dev_iotlb(info);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002184 iommu_detach_dev(info->iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002185
Jiang Liuab8dfe22014-07-11 14:19:27 +08002186 if (domain_type_is_vm(domain)) {
David Woodhouse7c7faa12014-03-09 13:33:06 -07002187 iommu_detach_dependent_devices(info->iommu, info->dev);
Jiang Liufb170fb2014-07-11 14:19:28 +08002188 domain_detach_iommu(domain, info->iommu);
Jiang Liu92d03cc2014-02-19 14:07:28 +08002189 }
2190
2191 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002192 spin_lock_irqsave(&device_domain_lock, flags);
2193 }
2194 spin_unlock_irqrestore(&device_domain_lock, flags);
2195}
2196
2197/*
2198 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002199 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002200 */
David Woodhouse1525a292014-03-06 16:19:30 +00002201static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002202{
2203 struct device_domain_info *info;
2204
2205 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002206 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002207 if (info)
2208 return info->domain;
2209 return NULL;
2210}
2211
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002212static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002213dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2214{
2215 struct device_domain_info *info;
2216
2217 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002218 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002219 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002220 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002221
2222 return NULL;
2223}
2224
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002225static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
David Woodhouse41e80dca2014-03-09 13:55:54 -07002226 int bus, int devfn,
David Woodhouseb718cd32014-03-09 13:11:33 -07002227 struct device *dev,
2228 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002229{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002230 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002231 struct device_domain_info *info;
2232 unsigned long flags;
2233
2234 info = alloc_devinfo_mem();
2235 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002236 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002237
Jiang Liu745f2582014-02-19 14:07:26 +08002238 info->bus = bus;
2239 info->devfn = devfn;
2240 info->dev = dev;
2241 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002242 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002243
2244 spin_lock_irqsave(&device_domain_lock, flags);
2245 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002246 found = find_domain(dev);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002247 else {
2248 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002249 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002250 if (info2)
2251 found = info2->domain;
2252 }
Jiang Liu745f2582014-02-19 14:07:26 +08002253 if (found) {
2254 spin_unlock_irqrestore(&device_domain_lock, flags);
2255 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002256 /* Caller must free the original domain */
2257 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002258 }
2259
David Woodhouseb718cd32014-03-09 13:11:33 -07002260 list_add(&info->link, &domain->devices);
2261 list_add(&info->global, &device_domain_list);
2262 if (dev)
2263 dev->archdata.iommu = info;
2264 spin_unlock_irqrestore(&device_domain_lock, flags);
2265
2266 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002267}
2268
Alex Williamson579305f2014-07-03 09:51:43 -06002269static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2270{
2271 *(u16 *)opaque = alias;
2272 return 0;
2273}
2274
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002275/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002276static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002277{
Alex Williamson579305f2014-07-03 09:51:43 -06002278 struct dmar_domain *domain, *tmp;
2279 struct intel_iommu *iommu;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002280 struct device_domain_info *info;
Alex Williamson579305f2014-07-03 09:51:43 -06002281 u16 dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002282 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002283 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002284
David Woodhouse146922e2014-03-09 15:44:17 -07002285 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002286 if (domain)
2287 return domain;
2288
David Woodhouse146922e2014-03-09 15:44:17 -07002289 iommu = device_to_iommu(dev, &bus, &devfn);
2290 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002291 return NULL;
2292
2293 if (dev_is_pci(dev)) {
2294 struct pci_dev *pdev = to_pci_dev(dev);
2295
2296 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2297
2298 spin_lock_irqsave(&device_domain_lock, flags);
2299 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2300 PCI_BUS_NUM(dma_alias),
2301 dma_alias & 0xff);
2302 if (info) {
2303 iommu = info->iommu;
2304 domain = info->domain;
2305 }
2306 spin_unlock_irqrestore(&device_domain_lock, flags);
2307
2308 /* DMA alias already has a domain, uses it */
2309 if (info)
2310 goto found_domain;
2311 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002312
David Woodhouse146922e2014-03-09 15:44:17 -07002313 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002314 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002315 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002316 return NULL;
Jiang Liu44bde612014-07-11 14:19:29 +08002317 domain->id = iommu_attach_domain(domain, iommu);
2318 if (domain->id < 0) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002319 free_domain_mem(domain);
Alex Williamson579305f2014-07-03 09:51:43 -06002320 return NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002321 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002322 domain_attach_iommu(domain, iommu);
Alex Williamson579305f2014-07-03 09:51:43 -06002323 if (domain_init(domain, gaw)) {
2324 domain_exit(domain);
2325 return NULL;
2326 }
2327
2328 /* register PCI DMA alias device */
2329 if (dev_is_pci(dev)) {
2330 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2331 dma_alias & 0xff, NULL, domain);
2332
2333 if (!tmp || tmp != domain) {
2334 domain_exit(domain);
2335 domain = tmp;
2336 }
2337
David Woodhouseb718cd32014-03-09 13:11:33 -07002338 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002339 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002340 }
2341
2342found_domain:
Alex Williamson579305f2014-07-03 09:51:43 -06002343 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2344
2345 if (!tmp || tmp != domain) {
2346 domain_exit(domain);
2347 domain = tmp;
2348 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002349
2350 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002351}
2352
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002353static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002354#define IDENTMAP_ALL 1
2355#define IDENTMAP_GFX 2
2356#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002357
David Woodhouseb2132032009-06-26 18:50:28 +01002358static int iommu_domain_identity_map(struct dmar_domain *domain,
2359 unsigned long long start,
2360 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002361{
David Woodhousec5395d52009-06-28 16:35:56 +01002362 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2363 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002364
David Woodhousec5395d52009-06-28 16:35:56 +01002365 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2366 dma_to_mm_pfn(last_vpfn))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002367 pr_err("Reserving iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002368 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002369 }
2370
David Woodhousec5395d52009-06-28 16:35:56 +01002371 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2372 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002373 /*
2374 * RMRR range might have overlap with physical memory range,
2375 * clear it first
2376 */
David Woodhousec5395d52009-06-28 16:35:56 +01002377 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002378
David Woodhousec5395d52009-06-28 16:35:56 +01002379 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2380 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002381 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002382}
2383
David Woodhouse0b9d9752014-03-09 15:48:15 -07002384static int iommu_prepare_identity_map(struct device *dev,
David Woodhouseb2132032009-06-26 18:50:28 +01002385 unsigned long long start,
2386 unsigned long long end)
2387{
2388 struct dmar_domain *domain;
2389 int ret;
2390
David Woodhouse0b9d9752014-03-09 15:48:15 -07002391 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002392 if (!domain)
2393 return -ENOMEM;
2394
David Woodhouse19943b02009-08-04 16:19:20 +01002395 /* For _hardware_ passthrough, don't bother. But for software
2396 passthrough, we do it anyway -- it may indicate a memory
2397 range which is reserved in E820, so which didn't get set
2398 up to start with in si_domain */
2399 if (domain == si_domain && hw_pass_through) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002400 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2401 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002402 return 0;
2403 }
2404
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002405 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2406 dev_name(dev), start, end);
2407
David Woodhouse5595b522009-12-02 09:21:55 +00002408 if (end < start) {
2409 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2410 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2411 dmi_get_system_info(DMI_BIOS_VENDOR),
2412 dmi_get_system_info(DMI_BIOS_VERSION),
2413 dmi_get_system_info(DMI_PRODUCT_VERSION));
2414 ret = -EIO;
2415 goto error;
2416 }
2417
David Woodhouse2ff729f2009-08-26 14:25:41 +01002418 if (end >> agaw_to_width(domain->agaw)) {
2419 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2420 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2421 agaw_to_width(domain->agaw),
2422 dmi_get_system_info(DMI_BIOS_VENDOR),
2423 dmi_get_system_info(DMI_BIOS_VERSION),
2424 dmi_get_system_info(DMI_PRODUCT_VERSION));
2425 ret = -EIO;
2426 goto error;
2427 }
David Woodhouse19943b02009-08-04 16:19:20 +01002428
David Woodhouseb2132032009-06-26 18:50:28 +01002429 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002430 if (ret)
2431 goto error;
2432
2433 /* context entry init */
David Woodhouse0b9d9752014-03-09 15:48:15 -07002434 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002435 if (ret)
2436 goto error;
2437
2438 return 0;
2439
2440 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002441 domain_exit(domain);
2442 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002443}
2444
2445static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002446 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002447{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002448 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002449 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002450 return iommu_prepare_identity_map(dev, rmrr->base_address,
2451 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002452}
2453
Suresh Siddhad3f13812011-08-23 17:05:25 -07002454#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002455static inline void iommu_prepare_isa(void)
2456{
2457 struct pci_dev *pdev;
2458 int ret;
2459
2460 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2461 if (!pdev)
2462 return;
2463
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002464 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002465 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002466
2467 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002468 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002469
Yijing Wang9b27e822014-05-20 20:37:52 +08002470 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002471}
2472#else
2473static inline void iommu_prepare_isa(void)
2474{
2475 return;
2476}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002477#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002478
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002479static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002480
Matt Kraai071e1372009-08-23 22:30:22 -07002481static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002482{
2483 struct dmar_drhd_unit *drhd;
2484 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002485 int nid, ret = 0;
Jiang Liu44bde612014-07-11 14:19:29 +08002486 bool first = true;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002487
Jiang Liuab8dfe22014-07-11 14:19:27 +08002488 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002489 if (!si_domain)
2490 return -EFAULT;
2491
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002492 for_each_active_iommu(iommu, drhd) {
2493 ret = iommu_attach_domain(si_domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08002494 if (ret < 0) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002495 domain_exit(si_domain);
2496 return -EFAULT;
Jiang Liu44bde612014-07-11 14:19:29 +08002497 } else if (first) {
2498 si_domain->id = ret;
2499 first = false;
2500 } else if (si_domain->id != ret) {
2501 domain_exit(si_domain);
2502 return -EFAULT;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002503 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002504 domain_attach_iommu(si_domain, iommu);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002505 }
2506
2507 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2508 domain_exit(si_domain);
2509 return -EFAULT;
2510 }
2511
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002512 pr_debug("Identity mapping domain is domain %d\n",
Jiang Liu9544c002014-01-06 14:18:13 +08002513 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002514
David Woodhouse19943b02009-08-04 16:19:20 +01002515 if (hw)
2516 return 0;
2517
David Woodhousec7ab48d2009-06-26 19:10:36 +01002518 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002519 unsigned long start_pfn, end_pfn;
2520 int i;
2521
2522 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2523 ret = iommu_domain_identity_map(si_domain,
2524 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2525 if (ret)
2526 return ret;
2527 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002528 }
2529
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002530 return 0;
2531}
2532
David Woodhouse9b226622014-03-09 14:03:28 -07002533static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002534{
2535 struct device_domain_info *info;
2536
2537 if (likely(!iommu_identity_mapping))
2538 return 0;
2539
David Woodhouse9b226622014-03-09 14:03:28 -07002540 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002541 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2542 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002543
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002544 return 0;
2545}
2546
2547static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5913c9b2014-03-09 16:27:31 -07002548 struct device *dev, int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002549{
David Woodhouse0ac72662014-03-09 13:19:22 -07002550 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002551 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002552 u8 bus, devfn;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002553 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002554
David Woodhouse5913c9b2014-03-09 16:27:31 -07002555 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002556 if (!iommu)
2557 return -ENODEV;
2558
David Woodhouse5913c9b2014-03-09 16:27:31 -07002559 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002560 if (ndomain != domain)
2561 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002562
David Woodhouse5913c9b2014-03-09 16:27:31 -07002563 ret = domain_context_mapping(domain, dev, translation);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002564 if (ret) {
David Woodhouse5913c9b2014-03-09 16:27:31 -07002565 domain_remove_one_dev_info(domain, dev);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002566 return ret;
2567 }
2568
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002569 return 0;
2570}
2571
David Woodhouse0b9d9752014-03-09 15:48:15 -07002572static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002573{
2574 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002575 struct device *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002576 int i;
2577
Jiang Liu0e242612014-02-19 14:07:34 +08002578 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002579 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002580 /*
2581 * Return TRUE if this RMRR contains the device that
2582 * is passed in.
2583 */
2584 for_each_active_dev_scope(rmrr->devices,
2585 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002586 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002587 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002588 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002589 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002590 }
Jiang Liu0e242612014-02-19 14:07:34 +08002591 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002592 return false;
2593}
2594
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002595/*
2596 * There are a couple cases where we need to restrict the functionality of
2597 * devices associated with RMRRs. The first is when evaluating a device for
2598 * identity mapping because problems exist when devices are moved in and out
2599 * of domains and their respective RMRR information is lost. This means that
2600 * a device with associated RMRRs will never be in a "passthrough" domain.
2601 * The second is use of the device through the IOMMU API. This interface
2602 * expects to have full control of the IOVA space for the device. We cannot
2603 * satisfy both the requirement that RMRR access is maintained and have an
2604 * unencumbered IOVA space. We also have no ability to quiesce the device's
2605 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2606 * We therefore prevent devices associated with an RMRR from participating in
2607 * the IOMMU API, which eliminates them from device assignment.
2608 *
2609 * In both cases we assume that PCI USB devices with RMRRs have them largely
2610 * for historical reasons and that the RMRR space is not actively used post
2611 * boot. This exclusion may change if vendors begin to abuse it.
David Woodhouse18436af2015-03-25 15:05:47 +00002612 *
2613 * The same exception is made for graphics devices, with the requirement that
2614 * any use of the RMRR regions will be torn down before assigning the device
2615 * to a guest.
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002616 */
2617static bool device_is_rmrr_locked(struct device *dev)
2618{
2619 if (!device_has_rmrr(dev))
2620 return false;
2621
2622 if (dev_is_pci(dev)) {
2623 struct pci_dev *pdev = to_pci_dev(dev);
2624
David Woodhouse18436af2015-03-25 15:05:47 +00002625 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002626 return false;
2627 }
2628
2629 return true;
2630}
2631
David Woodhouse3bdb2592014-03-09 16:03:08 -07002632static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002633{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002634
David Woodhouse3bdb2592014-03-09 16:03:08 -07002635 if (dev_is_pci(dev)) {
2636 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002637
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002638 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002639 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002640
David Woodhouse3bdb2592014-03-09 16:03:08 -07002641 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2642 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002643
David Woodhouse3bdb2592014-03-09 16:03:08 -07002644 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2645 return 1;
2646
2647 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2648 return 0;
2649
2650 /*
2651 * We want to start off with all devices in the 1:1 domain, and
2652 * take them out later if we find they can't access all of memory.
2653 *
2654 * However, we can't do this for PCI devices behind bridges,
2655 * because all PCI devices behind the same bridge will end up
2656 * with the same source-id on their transactions.
2657 *
2658 * Practically speaking, we can't change things around for these
2659 * devices at run-time, because we can't be sure there'll be no
2660 * DMA transactions in flight for any of their siblings.
2661 *
2662 * So PCI devices (unless they're on the root bus) as well as
2663 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2664 * the 1:1 domain, just in _case_ one of their siblings turns out
2665 * not to be able to map all of memory.
2666 */
2667 if (!pci_is_pcie(pdev)) {
2668 if (!pci_is_root_bus(pdev->bus))
2669 return 0;
2670 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2671 return 0;
2672 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2673 return 0;
2674 } else {
2675 if (device_has_rmrr(dev))
2676 return 0;
2677 }
David Woodhouse6941af22009-07-04 18:24:27 +01002678
David Woodhouse3dfc8132009-07-04 19:11:08 +01002679 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002680 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002681 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002682 * take them out of the 1:1 domain later.
2683 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002684 if (!startup) {
2685 /*
2686 * If the device's dma_mask is less than the system's memory
2687 * size then this is not a candidate for identity mapping.
2688 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002689 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002690
David Woodhouse3bdb2592014-03-09 16:03:08 -07002691 if (dev->coherent_dma_mask &&
2692 dev->coherent_dma_mask < dma_mask)
2693 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002694
David Woodhouse3bdb2592014-03-09 16:03:08 -07002695 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002696 }
David Woodhouse6941af22009-07-04 18:24:27 +01002697
2698 return 1;
2699}
2700
David Woodhousecf04eee2014-03-21 16:49:04 +00002701static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2702{
2703 int ret;
2704
2705 if (!iommu_should_identity_map(dev, 1))
2706 return 0;
2707
2708 ret = domain_add_dev_info(si_domain, dev,
2709 hw ? CONTEXT_TT_PASS_THROUGH :
2710 CONTEXT_TT_MULTI_LEVEL);
2711 if (!ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002712 pr_info("%s identity mapping for device %s\n",
2713 hw ? "Hardware" : "Software", dev_name(dev));
David Woodhousecf04eee2014-03-21 16:49:04 +00002714 else if (ret == -ENODEV)
2715 /* device not associated with an iommu */
2716 ret = 0;
2717
2718 return ret;
2719}
2720
2721
Matt Kraai071e1372009-08-23 22:30:22 -07002722static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002723{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002724 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002725 struct dmar_drhd_unit *drhd;
2726 struct intel_iommu *iommu;
2727 struct device *dev;
2728 int i;
2729 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002730
David Woodhouse19943b02009-08-04 16:19:20 +01002731 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002732 if (ret)
2733 return -EFAULT;
2734
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002735 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002736 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2737 if (ret)
2738 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002739 }
2740
David Woodhousecf04eee2014-03-21 16:49:04 +00002741 for_each_active_iommu(iommu, drhd)
2742 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2743 struct acpi_device_physical_node *pn;
2744 struct acpi_device *adev;
2745
2746 if (dev->bus != &acpi_bus_type)
2747 continue;
2748
2749 adev= to_acpi_device(dev);
2750 mutex_lock(&adev->physical_node_lock);
2751 list_for_each_entry(pn, &adev->physical_node_list, node) {
2752 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2753 if (ret)
2754 break;
2755 }
2756 mutex_unlock(&adev->physical_node_lock);
2757 if (ret)
2758 return ret;
2759 }
2760
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002761 return 0;
2762}
2763
Jiang Liuffebeb42014-11-09 22:48:02 +08002764static void intel_iommu_init_qi(struct intel_iommu *iommu)
2765{
2766 /*
2767 * Start from the sane iommu hardware state.
2768 * If the queued invalidation is already initialized by us
2769 * (for example, while enabling interrupt-remapping) then
2770 * we got the things already rolling from a sane state.
2771 */
2772 if (!iommu->qi) {
2773 /*
2774 * Clear any previous faults.
2775 */
2776 dmar_fault(-1, iommu);
2777 /*
2778 * Disable queued invalidation if supported and already enabled
2779 * before OS handover.
2780 */
2781 dmar_disable_qi(iommu);
2782 }
2783
2784 if (dmar_enable_qi(iommu)) {
2785 /*
2786 * Queued Invalidate not enabled, use Register Based Invalidate
2787 */
2788 iommu->flush.flush_context = __iommu_flush_context;
2789 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002790 pr_info("%s: Using Register based invalidation\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08002791 iommu->name);
2792 } else {
2793 iommu->flush.flush_context = qi_flush_context;
2794 iommu->flush.flush_iotlb = qi_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002795 pr_info("%s: Using Queued invalidation\n", iommu->name);
Jiang Liuffebeb42014-11-09 22:48:02 +08002796 }
2797}
2798
Joerg Roedel091d42e2015-06-12 11:56:10 +02002799static int copy_context_table(struct intel_iommu *iommu,
2800 struct root_entry *old_re,
2801 struct context_entry **tbl,
2802 int bus, bool ext)
2803{
2804 struct context_entry *old_ce = NULL, *new_ce = NULL, ce;
2805 int tbl_idx, pos = 0, idx, devfn, ret = 0;
2806 phys_addr_t old_ce_phys;
2807
2808 tbl_idx = ext ? bus * 2 : bus;
2809
2810 for (devfn = 0; devfn < 256; devfn++) {
2811 /* First calculate the correct index */
2812 idx = (ext ? devfn * 2 : devfn) % 256;
2813
2814 if (idx == 0) {
2815 /* First save what we may have and clean up */
2816 if (new_ce) {
2817 tbl[tbl_idx] = new_ce;
2818 __iommu_flush_cache(iommu, new_ce,
2819 VTD_PAGE_SIZE);
2820 pos = 1;
2821 }
2822
2823 if (old_ce)
2824 iounmap(old_ce);
2825
2826 ret = 0;
2827 if (devfn < 0x80)
2828 old_ce_phys = root_entry_lctp(old_re);
2829 else
2830 old_ce_phys = root_entry_uctp(old_re);
2831
2832 if (!old_ce_phys) {
2833 if (ext && devfn == 0) {
2834 /* No LCTP, try UCTP */
2835 devfn = 0x7f;
2836 continue;
2837 } else {
2838 goto out;
2839 }
2840 }
2841
2842 ret = -ENOMEM;
2843 old_ce = ioremap_cache(old_ce_phys, PAGE_SIZE);
2844 if (!old_ce)
2845 goto out;
2846
2847 new_ce = alloc_pgtable_page(iommu->node);
2848 if (!new_ce)
2849 goto out_unmap;
2850
2851 ret = 0;
2852 }
2853
2854 /* Now copy the context entry */
2855 ce = old_ce[idx];
2856
2857 if (!context_present(&ce))
2858 continue;
2859
2860 new_ce[idx] = ce;
2861 }
2862
2863 tbl[tbl_idx + pos] = new_ce;
2864
2865 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
2866
2867out_unmap:
2868 iounmap(old_ce);
2869
2870out:
2871 return ret;
2872}
2873
2874static int copy_translation_tables(struct intel_iommu *iommu)
2875{
2876 struct context_entry **ctxt_tbls;
2877 struct root_entry *old_rt;
2878 phys_addr_t old_rt_phys;
2879 int ctxt_table_entries;
2880 unsigned long flags;
2881 u64 rtaddr_reg;
2882 int bus, ret;
2883 bool ext;
2884
2885 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
2886 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
2887
2888 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
2889 if (!old_rt_phys)
2890 return -EINVAL;
2891
2892 old_rt = ioremap_cache(old_rt_phys, PAGE_SIZE);
2893 if (!old_rt)
2894 return -ENOMEM;
2895
2896 /* This is too big for the stack - allocate it from slab */
2897 ctxt_table_entries = ext ? 512 : 256;
2898 ret = -ENOMEM;
2899 ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
2900 if (!ctxt_tbls)
2901 goto out_unmap;
2902
2903 for (bus = 0; bus < 256; bus++) {
2904 ret = copy_context_table(iommu, &old_rt[bus],
2905 ctxt_tbls, bus, ext);
2906 if (ret) {
2907 pr_err("%s: Failed to copy context table for bus %d\n",
2908 iommu->name, bus);
2909 continue;
2910 }
2911 }
2912
2913 spin_lock_irqsave(&iommu->lock, flags);
2914
2915 /* Context tables are copied, now write them to the root_entry table */
2916 for (bus = 0; bus < 256; bus++) {
2917 int idx = ext ? bus * 2 : bus;
2918 u64 val;
2919
2920 if (ctxt_tbls[idx]) {
2921 val = virt_to_phys(ctxt_tbls[idx]) | 1;
2922 iommu->root_entry[bus].lo = val;
2923 }
2924
2925 if (!ext || !ctxt_tbls[idx + 1])
2926 continue;
2927
2928 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
2929 iommu->root_entry[bus].hi = val;
2930 }
2931
2932 spin_unlock_irqrestore(&iommu->lock, flags);
2933
2934 kfree(ctxt_tbls);
2935
2936 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
2937
2938 ret = 0;
2939
2940out_unmap:
2941 iounmap(old_rt);
2942
2943 return ret;
2944}
2945
Joseph Cihulab7792602011-05-03 00:08:37 -07002946static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002947{
2948 struct dmar_drhd_unit *drhd;
2949 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002950 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002951 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002952 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002953
2954 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002955 * for each drhd
2956 * allocate root
2957 * initialize and program root entry to not present
2958 * endfor
2959 */
2960 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002961 /*
2962 * lock not needed as this is only incremented in the single
2963 * threaded kernel __init code path all other access are read
2964 * only
2965 */
Jiang Liu78d8e702014-11-09 22:47:57 +08002966 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08002967 g_num_of_iommus++;
2968 continue;
2969 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002970 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002971 }
2972
Jiang Liuffebeb42014-11-09 22:48:02 +08002973 /* Preallocate enough resources for IOMMU hot-addition */
2974 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
2975 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
2976
Weidong Hand9630fe2008-12-08 11:06:32 +08002977 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2978 GFP_KERNEL);
2979 if (!g_iommus) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002980 pr_err("Allocating global iommu array failed\n");
Weidong Hand9630fe2008-12-08 11:06:32 +08002981 ret = -ENOMEM;
2982 goto error;
2983 }
2984
mark gross80b20dd2008-04-18 13:53:58 -07002985 deferred_flush = kzalloc(g_num_of_iommus *
2986 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2987 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002988 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08002989 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08002990 }
2991
Jiang Liu7c919772014-01-06 14:18:18 +08002992 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002993 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002994
Joerg Roedelb63d80d2015-06-12 09:14:34 +02002995 intel_iommu_init_qi(iommu);
2996
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002997 ret = iommu_init_domains(iommu);
2998 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002999 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003000
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003001 init_translation_status(iommu);
3002
Joerg Roedel091d42e2015-06-12 11:56:10 +02003003 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3004 iommu_disable_translation(iommu);
3005 clear_translation_pre_enabled(iommu);
3006 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3007 iommu->name);
3008 }
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003009
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003010 /*
3011 * TBD:
3012 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003013 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003014 */
3015 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003016 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003017 goto free_iommu;
Joerg Roedel5f0a7f72015-06-12 09:18:53 +02003018
Joerg Roedel091d42e2015-06-12 11:56:10 +02003019 if (translation_pre_enabled(iommu)) {
3020 pr_info("Translation already enabled - trying to copy translation structures\n");
3021
3022 ret = copy_translation_tables(iommu);
3023 if (ret) {
3024 /*
3025 * We found the IOMMU with translation
3026 * enabled - but failed to copy over the
3027 * old root-entry table. Try to proceed
3028 * by disabling translation now and
3029 * allocating a clean root-entry table.
3030 * This might cause DMAR faults, but
3031 * probably the dump will still succeed.
3032 */
3033 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3034 iommu->name);
3035 iommu_disable_translation(iommu);
3036 clear_translation_pre_enabled(iommu);
3037 } else {
3038 pr_info("Copied translation tables from previous kernel for %s\n",
3039 iommu->name);
3040 }
3041 }
3042
Joerg Roedel5f0a7f72015-06-12 09:18:53 +02003043 iommu_flush_write_buffer(iommu);
3044 iommu_set_root_entry(iommu);
3045 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3046 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3047
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003048 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01003049 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003050 }
3051
David Woodhouse19943b02009-08-04 16:19:20 +01003052 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07003053 iommu_identity_mapping |= IDENTMAP_ALL;
3054
Suresh Siddhad3f13812011-08-23 17:05:25 -07003055#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07003056 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01003057#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07003058
3059 check_tylersburg_isoch();
3060
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003061 /*
3062 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003063 * identity mappings for rmrr, gfx, and isa and may fall back to static
3064 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003065 */
David Woodhouse19943b02009-08-04 16:19:20 +01003066 if (iommu_identity_mapping) {
3067 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3068 if (ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003069 pr_crit("Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08003070 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003071 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003072 }
David Woodhouse19943b02009-08-04 16:19:20 +01003073 /*
3074 * For each rmrr
3075 * for each dev attached to rmrr
3076 * do
3077 * locate drhd for dev, alloc domain for dev
3078 * allocate free domain
3079 * allocate page table entries for rmrr
3080 * if context not allocated for bus
3081 * allocate and init context
3082 * set present in root table for this bus
3083 * init context with domain, translation etc
3084 * endfor
3085 * endfor
3086 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003087 pr_info("Setting RMRR:\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003088 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08003089 /* some BIOS lists non-exist devices in DMAR table. */
3090 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00003091 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07003092 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01003093 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003094 pr_err("Mapping reserved region failed\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003095 }
3096 }
3097
3098 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07003099
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003100 /*
3101 * for each drhd
3102 * enable fault log
3103 * global invalidate context cache
3104 * global invalidate iotlb
3105 * enable translation
3106 */
Jiang Liu7c919772014-01-06 14:18:18 +08003107 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07003108 if (drhd->ignored) {
3109 /*
3110 * we always have to disable PMRs or DMA may fail on
3111 * this device
3112 */
3113 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08003114 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003115 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07003116 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003117
3118 iommu_flush_write_buffer(iommu);
3119
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003120 ret = dmar_set_interrupt(iommu);
3121 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003122 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003123
Jiang Liu2a41cce2014-07-11 14:19:33 +08003124 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003125 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003126 }
3127
3128 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08003129
3130free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08003131 for_each_active_iommu(iommu, drhd) {
3132 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08003133 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003134 }
Jiang Liu9bdc5312014-01-06 14:18:27 +08003135 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08003136free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08003137 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08003138error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003139 return ret;
3140}
3141
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003142/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01003143static struct iova *intel_alloc_iova(struct device *dev,
3144 struct dmar_domain *domain,
3145 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003146{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003147 struct iova *iova = NULL;
3148
David Woodhouse875764d2009-06-28 21:20:51 +01003149 /* Restrict dma_mask to the width that the iommu can handle */
3150 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3151
3152 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003153 /*
3154 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07003155 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08003156 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003157 */
David Woodhouse875764d2009-06-28 21:20:51 +01003158 iova = alloc_iova(&domain->iovad, nrpages,
3159 IOVA_PFN(DMA_BIT_MASK(32)), 1);
3160 if (iova)
3161 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003162 }
David Woodhouse875764d2009-06-28 21:20:51 +01003163 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
3164 if (unlikely(!iova)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003165 pr_err("Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07003166 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003167 return NULL;
3168 }
3169
3170 return iova;
3171}
3172
David Woodhoused4b709f2014-03-09 16:07:40 -07003173static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003174{
3175 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003176 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003177
David Woodhoused4b709f2014-03-09 16:07:40 -07003178 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003179 if (!domain) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003180 pr_err("Allocating domain for %s failed\n",
David Woodhoused4b709f2014-03-09 16:07:40 -07003181 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00003182 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003183 }
3184
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003185 /* make sure context mapping is ok */
David Woodhoused4b709f2014-03-09 16:07:40 -07003186 if (unlikely(!domain_context_mapped(dev))) {
3187 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003188 if (ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003189 pr_err("Domain context map for %s failed\n",
David Woodhoused4b709f2014-03-09 16:07:40 -07003190 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00003191 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003192 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003193 }
3194
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003195 return domain;
3196}
3197
David Woodhoused4b709f2014-03-09 16:07:40 -07003198static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01003199{
3200 struct device_domain_info *info;
3201
3202 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07003203 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01003204 if (likely(info))
3205 return info->domain;
3206
3207 return __get_valid_domain_for_dev(dev);
3208}
3209
David Woodhouseecb509e2014-03-09 16:29:55 -07003210/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01003211static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003212{
3213 int found;
3214
David Woodhouse3d891942014-03-06 15:59:26 +00003215 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003216 return 1;
3217
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003218 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003219 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003220
David Woodhouse9b226622014-03-09 14:03:28 -07003221 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003222 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07003223 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003224 return 1;
3225 else {
3226 /*
3227 * 32 bit DMA is removed from si_domain and fall back
3228 * to non-identity mapping.
3229 */
David Woodhousebf9c9ed2014-03-09 16:19:13 -07003230 domain_remove_one_dev_info(si_domain, dev);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003231 pr_info("32bit %s uses non-identity mapping\n",
3232 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003233 return 0;
3234 }
3235 } else {
3236 /*
3237 * In case of a detached 64 bit DMA device from vm, the device
3238 * is put into si_domain for identity mapping.
3239 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003240 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003241 int ret;
David Woodhouse5913c9b2014-03-09 16:27:31 -07003242 ret = domain_add_dev_info(si_domain, dev,
David Woodhouse5fe60f42009-08-09 10:53:41 +01003243 hw_pass_through ?
3244 CONTEXT_TT_PASS_THROUGH :
3245 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003246 if (!ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003247 pr_info("64bit %s uses identity mapping\n",
3248 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003249 return 1;
3250 }
3251 }
3252 }
3253
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003254 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003255}
3256
David Woodhouse5040a912014-03-09 16:14:00 -07003257static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003258 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003259{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003260 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003261 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003262 struct iova *iova;
3263 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003264 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003265 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003266 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003267
3268 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003269
David Woodhouse5040a912014-03-09 16:14:00 -07003270 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003271 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003272
David Woodhouse5040a912014-03-09 16:14:00 -07003273 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003274 if (!domain)
3275 return 0;
3276
Weidong Han8c11e792008-12-08 15:29:22 +08003277 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003278 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003279
David Woodhouse5040a912014-03-09 16:14:00 -07003280 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003281 if (!iova)
3282 goto error;
3283
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003284 /*
3285 * Check if DMAR supports zero-length reads on write only
3286 * mappings..
3287 */
3288 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003289 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003290 prot |= DMA_PTE_READ;
3291 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3292 prot |= DMA_PTE_WRITE;
3293 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003294 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003295 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003296 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003297 * is not a big problem
3298 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003299 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003300 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003301 if (ret)
3302 goto error;
3303
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003304 /* it's a non-present to present mapping. Only flush if caching mode */
3305 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003306 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003307 else
Weidong Han8c11e792008-12-08 15:29:22 +08003308 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003309
David Woodhouse03d6a242009-06-28 15:33:46 +01003310 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3311 start_paddr += paddr & ~PAGE_MASK;
3312 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003313
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003314error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003315 if (iova)
3316 __free_iova(&domain->iovad, iova);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003317 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003318 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003319 return 0;
3320}
3321
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003322static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3323 unsigned long offset, size_t size,
3324 enum dma_data_direction dir,
3325 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003326{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003327 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003328 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003329}
3330
mark gross5e0d2a62008-03-04 15:22:08 -08003331static void flush_unmaps(void)
3332{
mark gross80b20dd2008-04-18 13:53:58 -07003333 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003334
mark gross5e0d2a62008-03-04 15:22:08 -08003335 timer_on = 0;
3336
3337 /* just flush them all */
3338 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003339 struct intel_iommu *iommu = g_iommus[i];
3340 if (!iommu)
3341 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003342
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003343 if (!deferred_flush[i].next)
3344 continue;
3345
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003346 /* In caching mode, global flushes turn emulation expensive */
3347 if (!cap_caching_mode(iommu->cap))
3348 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003349 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003350 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003351 unsigned long mask;
3352 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003353 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003354
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003355 /* On real hardware multiple invalidations are expensive */
3356 if (cap_caching_mode(iommu->cap))
3357 iommu_flush_iotlb_psi(iommu, domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08003358 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00003359 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003360 else {
Jiang Liua156ef92014-07-11 14:19:36 +08003361 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003362 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3363 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3364 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003365 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003366 if (deferred_flush[i].freelist[j])
3367 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003368 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003369 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003370 }
3371
mark gross5e0d2a62008-03-04 15:22:08 -08003372 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003373}
3374
3375static void flush_unmaps_timeout(unsigned long data)
3376{
mark gross80b20dd2008-04-18 13:53:58 -07003377 unsigned long flags;
3378
3379 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003380 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003381 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003382}
3383
David Woodhouseea8ea462014-03-05 17:09:32 +00003384static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003385{
3386 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003387 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003388 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003389
3390 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003391 if (list_size == HIGH_WATER_MARK)
3392 flush_unmaps();
3393
Weidong Han8c11e792008-12-08 15:29:22 +08003394 iommu = domain_get_iommu(dom);
3395 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003396
mark gross80b20dd2008-04-18 13:53:58 -07003397 next = deferred_flush[iommu_id].next;
3398 deferred_flush[iommu_id].domain[next] = dom;
3399 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003400 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003401 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003402
3403 if (!timer_on) {
3404 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3405 timer_on = 1;
3406 }
3407 list_size++;
3408 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3409}
3410
Jiang Liud41a4ad2014-07-11 14:19:34 +08003411static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003412{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003413 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003414 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003415 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003416 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003417 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003418
David Woodhouse73676832009-07-04 14:08:36 +01003419 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003420 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003421
David Woodhouse1525a292014-03-06 16:19:30 +00003422 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003423 BUG_ON(!domain);
3424
Weidong Han8c11e792008-12-08 15:29:22 +08003425 iommu = domain_get_iommu(domain);
3426
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003427 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003428 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3429 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003430 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003431
David Woodhoused794dc92009-06-28 00:27:49 +01003432 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3433 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003434
David Woodhoused794dc92009-06-28 00:27:49 +01003435 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003436 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003437
David Woodhouseea8ea462014-03-05 17:09:32 +00003438 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003439
mark gross5e0d2a62008-03-04 15:22:08 -08003440 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01003441 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003442 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003443 /* free iova */
3444 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003445 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003446 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003447 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003448 /*
3449 * queue up the release of the unmap to save the 1/6th of the
3450 * cpu used up by the iotlb flush operation...
3451 */
mark gross5e0d2a62008-03-04 15:22:08 -08003452 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003453}
3454
Jiang Liud41a4ad2014-07-11 14:19:34 +08003455static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3456 size_t size, enum dma_data_direction dir,
3457 struct dma_attrs *attrs)
3458{
3459 intel_unmap(dev, dev_addr);
3460}
3461
David Woodhouse5040a912014-03-09 16:14:00 -07003462static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003463 dma_addr_t *dma_handle, gfp_t flags,
3464 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003465{
Akinobu Mita36746432014-06-04 16:06:51 -07003466 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003467 int order;
3468
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003469 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003470 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003471
David Woodhouse5040a912014-03-09 16:14:00 -07003472 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003473 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003474 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3475 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003476 flags |= GFP_DMA;
3477 else
3478 flags |= GFP_DMA32;
3479 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003480
Akinobu Mita36746432014-06-04 16:06:51 -07003481 if (flags & __GFP_WAIT) {
3482 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003483
Akinobu Mita36746432014-06-04 16:06:51 -07003484 page = dma_alloc_from_contiguous(dev, count, order);
3485 if (page && iommu_no_mapping(dev) &&
3486 page_to_phys(page) + size > dev->coherent_dma_mask) {
3487 dma_release_from_contiguous(dev, page, count);
3488 page = NULL;
3489 }
3490 }
3491
3492 if (!page)
3493 page = alloc_pages(flags, order);
3494 if (!page)
3495 return NULL;
3496 memset(page_address(page), 0, size);
3497
3498 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003499 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003500 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003501 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003502 return page_address(page);
3503 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3504 __free_pages(page, order);
3505
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003506 return NULL;
3507}
3508
David Woodhouse5040a912014-03-09 16:14:00 -07003509static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003510 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003511{
3512 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003513 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003514
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003515 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003516 order = get_order(size);
3517
Jiang Liud41a4ad2014-07-11 14:19:34 +08003518 intel_unmap(dev, dma_handle);
Akinobu Mita36746432014-06-04 16:06:51 -07003519 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3520 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003521}
3522
David Woodhouse5040a912014-03-09 16:14:00 -07003523static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003524 int nelems, enum dma_data_direction dir,
3525 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003526{
Jiang Liud41a4ad2014-07-11 14:19:34 +08003527 intel_unmap(dev, sglist[0].dma_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003528}
3529
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003530static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003531 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003532{
3533 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003534 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003535
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003536 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003537 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003538 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003539 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003540 }
3541 return nelems;
3542}
3543
David Woodhouse5040a912014-03-09 16:14:00 -07003544static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003545 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003546{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003547 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003548 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003549 size_t size = 0;
3550 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003551 struct iova *iova = NULL;
3552 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003553 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003554 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003555 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003556
3557 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003558 if (iommu_no_mapping(dev))
3559 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003560
David Woodhouse5040a912014-03-09 16:14:00 -07003561 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003562 if (!domain)
3563 return 0;
3564
Weidong Han8c11e792008-12-08 15:29:22 +08003565 iommu = domain_get_iommu(domain);
3566
David Woodhouseb536d242009-06-28 14:49:31 +01003567 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003568 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003569
David Woodhouse5040a912014-03-09 16:14:00 -07003570 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3571 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003572 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003573 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003574 return 0;
3575 }
3576
3577 /*
3578 * Check if DMAR supports zero-length reads on write only
3579 * mappings..
3580 */
3581 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003582 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003583 prot |= DMA_PTE_READ;
3584 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3585 prot |= DMA_PTE_WRITE;
3586
David Woodhouseb536d242009-06-28 14:49:31 +01003587 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003588
Fenghua Yuf5329592009-08-04 15:09:37 -07003589 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003590 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003591 dma_pte_free_pagetable(domain, start_vpfn,
3592 start_vpfn + size - 1);
David Woodhousee1605492009-06-29 11:17:38 +01003593 __free_iova(&domain->iovad, iova);
3594 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003595 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003596
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003597 /* it's a non-present to present mapping. Only flush if caching mode */
3598 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003599 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003600 else
Weidong Han8c11e792008-12-08 15:29:22 +08003601 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003602
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003603 return nelems;
3604}
3605
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003606static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3607{
3608 return !dma_addr;
3609}
3610
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003611struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003612 .alloc = intel_alloc_coherent,
3613 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003614 .map_sg = intel_map_sg,
3615 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003616 .map_page = intel_map_page,
3617 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003618 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003619};
3620
3621static inline int iommu_domain_cache_init(void)
3622{
3623 int ret = 0;
3624
3625 iommu_domain_cache = kmem_cache_create("iommu_domain",
3626 sizeof(struct dmar_domain),
3627 0,
3628 SLAB_HWCACHE_ALIGN,
3629
3630 NULL);
3631 if (!iommu_domain_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003632 pr_err("Couldn't create iommu_domain cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003633 ret = -ENOMEM;
3634 }
3635
3636 return ret;
3637}
3638
3639static inline int iommu_devinfo_cache_init(void)
3640{
3641 int ret = 0;
3642
3643 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3644 sizeof(struct device_domain_info),
3645 0,
3646 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003647 NULL);
3648 if (!iommu_devinfo_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003649 pr_err("Couldn't create devinfo cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003650 ret = -ENOMEM;
3651 }
3652
3653 return ret;
3654}
3655
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003656static int __init iommu_init_mempool(void)
3657{
3658 int ret;
3659 ret = iommu_iova_cache_init();
3660 if (ret)
3661 return ret;
3662
3663 ret = iommu_domain_cache_init();
3664 if (ret)
3665 goto domain_error;
3666
3667 ret = iommu_devinfo_cache_init();
3668 if (!ret)
3669 return ret;
3670
3671 kmem_cache_destroy(iommu_domain_cache);
3672domain_error:
Robin Murphy85b45452015-01-12 17:51:14 +00003673 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003674
3675 return -ENOMEM;
3676}
3677
3678static void __init iommu_exit_mempool(void)
3679{
3680 kmem_cache_destroy(iommu_devinfo_cache);
3681 kmem_cache_destroy(iommu_domain_cache);
Robin Murphy85b45452015-01-12 17:51:14 +00003682 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003683}
3684
Dan Williams556ab452010-07-23 15:47:56 -07003685static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3686{
3687 struct dmar_drhd_unit *drhd;
3688 u32 vtbar;
3689 int rc;
3690
3691 /* We know that this device on this chipset has its own IOMMU.
3692 * If we find it under a different IOMMU, then the BIOS is lying
3693 * to us. Hope that the IOMMU for this device is actually
3694 * disabled, and it needs no translation...
3695 */
3696 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3697 if (rc) {
3698 /* "can't" happen */
3699 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3700 return;
3701 }
3702 vtbar &= 0xffff0000;
3703
3704 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3705 drhd = dmar_find_matched_drhd_unit(pdev);
3706 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3707 TAINT_FIRMWARE_WORKAROUND,
3708 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3709 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3710}
3711DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3712
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003713static void __init init_no_remapping_devices(void)
3714{
3715 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003716 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003717 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003718
3719 for_each_drhd_unit(drhd) {
3720 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003721 for_each_active_dev_scope(drhd->devices,
3722 drhd->devices_cnt, i, dev)
3723 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003724 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003725 if (i == drhd->devices_cnt)
3726 drhd->ignored = 1;
3727 }
3728 }
3729
Jiang Liu7c919772014-01-06 14:18:18 +08003730 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003731 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003732 continue;
3733
Jiang Liub683b232014-02-19 14:07:32 +08003734 for_each_active_dev_scope(drhd->devices,
3735 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003736 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003737 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003738 if (i < drhd->devices_cnt)
3739 continue;
3740
David Woodhousec0771df2011-10-14 20:59:46 +01003741 /* This IOMMU has *only* gfx devices. Either bypass it or
3742 set the gfx_mapped flag, as appropriate */
3743 if (dmar_map_gfx) {
3744 intel_iommu_gfx_mapped = 1;
3745 } else {
3746 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003747 for_each_active_dev_scope(drhd->devices,
3748 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003749 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003750 }
3751 }
3752}
3753
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003754#ifdef CONFIG_SUSPEND
3755static int init_iommu_hw(void)
3756{
3757 struct dmar_drhd_unit *drhd;
3758 struct intel_iommu *iommu = NULL;
3759
3760 for_each_active_iommu(iommu, drhd)
3761 if (iommu->qi)
3762 dmar_reenable_qi(iommu);
3763
Joseph Cihulab7792602011-05-03 00:08:37 -07003764 for_each_iommu(iommu, drhd) {
3765 if (drhd->ignored) {
3766 /*
3767 * we always have to disable PMRs or DMA may fail on
3768 * this device
3769 */
3770 if (force_on)
3771 iommu_disable_protect_mem_regions(iommu);
3772 continue;
3773 }
3774
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003775 iommu_flush_write_buffer(iommu);
3776
3777 iommu_set_root_entry(iommu);
3778
3779 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003780 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08003781 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3782 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003783 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003784 }
3785
3786 return 0;
3787}
3788
3789static void iommu_flush_all(void)
3790{
3791 struct dmar_drhd_unit *drhd;
3792 struct intel_iommu *iommu;
3793
3794 for_each_active_iommu(iommu, drhd) {
3795 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003796 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003797 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003798 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003799 }
3800}
3801
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003802static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003803{
3804 struct dmar_drhd_unit *drhd;
3805 struct intel_iommu *iommu = NULL;
3806 unsigned long flag;
3807
3808 for_each_active_iommu(iommu, drhd) {
3809 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3810 GFP_ATOMIC);
3811 if (!iommu->iommu_state)
3812 goto nomem;
3813 }
3814
3815 iommu_flush_all();
3816
3817 for_each_active_iommu(iommu, drhd) {
3818 iommu_disable_translation(iommu);
3819
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003820 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003821
3822 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3823 readl(iommu->reg + DMAR_FECTL_REG);
3824 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3825 readl(iommu->reg + DMAR_FEDATA_REG);
3826 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3827 readl(iommu->reg + DMAR_FEADDR_REG);
3828 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3829 readl(iommu->reg + DMAR_FEUADDR_REG);
3830
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003831 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003832 }
3833 return 0;
3834
3835nomem:
3836 for_each_active_iommu(iommu, drhd)
3837 kfree(iommu->iommu_state);
3838
3839 return -ENOMEM;
3840}
3841
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003842static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003843{
3844 struct dmar_drhd_unit *drhd;
3845 struct intel_iommu *iommu = NULL;
3846 unsigned long flag;
3847
3848 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003849 if (force_on)
3850 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3851 else
3852 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003853 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003854 }
3855
3856 for_each_active_iommu(iommu, drhd) {
3857
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003858 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003859
3860 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3861 iommu->reg + DMAR_FECTL_REG);
3862 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3863 iommu->reg + DMAR_FEDATA_REG);
3864 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3865 iommu->reg + DMAR_FEADDR_REG);
3866 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3867 iommu->reg + DMAR_FEUADDR_REG);
3868
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003869 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003870 }
3871
3872 for_each_active_iommu(iommu, drhd)
3873 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003874}
3875
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003876static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003877 .resume = iommu_resume,
3878 .suspend = iommu_suspend,
3879};
3880
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003881static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003882{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003883 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003884}
3885
3886#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003887static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003888#endif /* CONFIG_PM */
3889
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003890
Jiang Liuc2a0b532014-11-09 22:47:56 +08003891int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003892{
3893 struct acpi_dmar_reserved_memory *rmrr;
3894 struct dmar_rmrr_unit *rmrru;
3895
3896 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3897 if (!rmrru)
3898 return -ENOMEM;
3899
3900 rmrru->hdr = header;
3901 rmrr = (struct acpi_dmar_reserved_memory *)header;
3902 rmrru->base_address = rmrr->base_address;
3903 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003904 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3905 ((void *)rmrr) + rmrr->header.length,
3906 &rmrru->devices_cnt);
3907 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3908 kfree(rmrru);
3909 return -ENOMEM;
3910 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003911
Jiang Liu2e455282014-02-19 14:07:36 +08003912 list_add(&rmrru->list, &dmar_rmrr_units);
3913
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003914 return 0;
3915}
3916
Jiang Liu6b197242014-11-09 22:47:58 +08003917static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3918{
3919 struct dmar_atsr_unit *atsru;
3920 struct acpi_dmar_atsr *tmp;
3921
3922 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3923 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3924 if (atsr->segment != tmp->segment)
3925 continue;
3926 if (atsr->header.length != tmp->header.length)
3927 continue;
3928 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3929 return atsru;
3930 }
3931
3932 return NULL;
3933}
3934
3935int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003936{
3937 struct acpi_dmar_atsr *atsr;
3938 struct dmar_atsr_unit *atsru;
3939
Jiang Liu6b197242014-11-09 22:47:58 +08003940 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
3941 return 0;
3942
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003943 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08003944 atsru = dmar_find_atsr(atsr);
3945 if (atsru)
3946 return 0;
3947
3948 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003949 if (!atsru)
3950 return -ENOMEM;
3951
Jiang Liu6b197242014-11-09 22:47:58 +08003952 /*
3953 * If memory is allocated from slab by ACPI _DSM method, we need to
3954 * copy the memory content because the memory buffer will be freed
3955 * on return.
3956 */
3957 atsru->hdr = (void *)(atsru + 1);
3958 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003959 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08003960 if (!atsru->include_all) {
3961 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3962 (void *)atsr + atsr->header.length,
3963 &atsru->devices_cnt);
3964 if (atsru->devices_cnt && atsru->devices == NULL) {
3965 kfree(atsru);
3966 return -ENOMEM;
3967 }
3968 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003969
Jiang Liu0e242612014-02-19 14:07:34 +08003970 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003971
3972 return 0;
3973}
3974
Jiang Liu9bdc5312014-01-06 14:18:27 +08003975static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3976{
3977 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3978 kfree(atsru);
3979}
3980
Jiang Liu6b197242014-11-09 22:47:58 +08003981int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3982{
3983 struct acpi_dmar_atsr *atsr;
3984 struct dmar_atsr_unit *atsru;
3985
3986 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3987 atsru = dmar_find_atsr(atsr);
3988 if (atsru) {
3989 list_del_rcu(&atsru->list);
3990 synchronize_rcu();
3991 intel_iommu_free_atsr(atsru);
3992 }
3993
3994 return 0;
3995}
3996
3997int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3998{
3999 int i;
4000 struct device *dev;
4001 struct acpi_dmar_atsr *atsr;
4002 struct dmar_atsr_unit *atsru;
4003
4004 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4005 atsru = dmar_find_atsr(atsr);
4006 if (!atsru)
4007 return 0;
4008
4009 if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
4010 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4011 i, dev)
4012 return -EBUSY;
4013
4014 return 0;
4015}
4016
Jiang Liuffebeb42014-11-09 22:48:02 +08004017static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4018{
4019 int sp, ret = 0;
4020 struct intel_iommu *iommu = dmaru->iommu;
4021
4022 if (g_iommus[iommu->seq_id])
4023 return 0;
4024
4025 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004026 pr_warn("%s: Doesn't support hardware pass through.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004027 iommu->name);
4028 return -ENXIO;
4029 }
4030 if (!ecap_sc_support(iommu->ecap) &&
4031 domain_update_iommu_snooping(iommu)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004032 pr_warn("%s: Doesn't support snooping.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004033 iommu->name);
4034 return -ENXIO;
4035 }
4036 sp = domain_update_iommu_superpage(iommu) - 1;
4037 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004038 pr_warn("%s: Doesn't support large page.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004039 iommu->name);
4040 return -ENXIO;
4041 }
4042
4043 /*
4044 * Disable translation if already enabled prior to OS handover.
4045 */
4046 if (iommu->gcmd & DMA_GCMD_TE)
4047 iommu_disable_translation(iommu);
4048
4049 g_iommus[iommu->seq_id] = iommu;
4050 ret = iommu_init_domains(iommu);
4051 if (ret == 0)
4052 ret = iommu_alloc_root_entry(iommu);
4053 if (ret)
4054 goto out;
4055
4056 if (dmaru->ignored) {
4057 /*
4058 * we always have to disable PMRs or DMA may fail on this device
4059 */
4060 if (force_on)
4061 iommu_disable_protect_mem_regions(iommu);
4062 return 0;
4063 }
4064
4065 intel_iommu_init_qi(iommu);
4066 iommu_flush_write_buffer(iommu);
4067 ret = dmar_set_interrupt(iommu);
4068 if (ret)
4069 goto disable_iommu;
4070
4071 iommu_set_root_entry(iommu);
4072 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4073 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4074 iommu_enable_translation(iommu);
4075
4076 if (si_domain) {
4077 ret = iommu_attach_domain(si_domain, iommu);
4078 if (ret < 0 || si_domain->id != ret)
4079 goto disable_iommu;
4080 domain_attach_iommu(si_domain, iommu);
4081 }
4082
4083 iommu_disable_protect_mem_regions(iommu);
4084 return 0;
4085
4086disable_iommu:
4087 disable_dmar_iommu(iommu);
4088out:
4089 free_dmar_iommu(iommu);
4090 return ret;
4091}
4092
Jiang Liu6b197242014-11-09 22:47:58 +08004093int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4094{
Jiang Liuffebeb42014-11-09 22:48:02 +08004095 int ret = 0;
4096 struct intel_iommu *iommu = dmaru->iommu;
4097
4098 if (!intel_iommu_enabled)
4099 return 0;
4100 if (iommu == NULL)
4101 return -EINVAL;
4102
4103 if (insert) {
4104 ret = intel_iommu_add(dmaru);
4105 } else {
4106 disable_dmar_iommu(iommu);
4107 free_dmar_iommu(iommu);
4108 }
4109
4110 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08004111}
4112
Jiang Liu9bdc5312014-01-06 14:18:27 +08004113static void intel_iommu_free_dmars(void)
4114{
4115 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4116 struct dmar_atsr_unit *atsru, *atsr_n;
4117
4118 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4119 list_del(&rmrru->list);
4120 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4121 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004122 }
4123
Jiang Liu9bdc5312014-01-06 14:18:27 +08004124 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4125 list_del(&atsru->list);
4126 intel_iommu_free_atsr(atsru);
4127 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004128}
4129
4130int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4131{
Jiang Liub683b232014-02-19 14:07:32 +08004132 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004133 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00004134 struct pci_dev *bridge = NULL;
4135 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004136 struct acpi_dmar_atsr *atsr;
4137 struct dmar_atsr_unit *atsru;
4138
4139 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004140 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08004141 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004142 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08004143 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004144 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004145 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004146 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004147 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08004148 if (!bridge)
4149 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004150
Jiang Liu0e242612014-02-19 14:07:34 +08004151 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08004152 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4153 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4154 if (atsr->segment != pci_domain_nr(dev->bus))
4155 continue;
4156
Jiang Liub683b232014-02-19 14:07:32 +08004157 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00004158 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08004159 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004160
4161 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08004162 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004163 }
Jiang Liub683b232014-02-19 14:07:32 +08004164 ret = 0;
4165out:
Jiang Liu0e242612014-02-19 14:07:34 +08004166 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004167
Jiang Liub683b232014-02-19 14:07:32 +08004168 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004169}
4170
Jiang Liu59ce0512014-02-19 14:07:35 +08004171int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4172{
4173 int ret = 0;
4174 struct dmar_rmrr_unit *rmrru;
4175 struct dmar_atsr_unit *atsru;
4176 struct acpi_dmar_atsr *atsr;
4177 struct acpi_dmar_reserved_memory *rmrr;
4178
4179 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
4180 return 0;
4181
4182 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4183 rmrr = container_of(rmrru->hdr,
4184 struct acpi_dmar_reserved_memory, header);
4185 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4186 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4187 ((void *)rmrr) + rmrr->header.length,
4188 rmrr->segment, rmrru->devices,
4189 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08004190 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08004191 return ret;
4192 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08004193 dmar_remove_dev_scope(info, rmrr->segment,
4194 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08004195 }
4196 }
4197
4198 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4199 if (atsru->include_all)
4200 continue;
4201
4202 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4203 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4204 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4205 (void *)atsr + atsr->header.length,
4206 atsr->segment, atsru->devices,
4207 atsru->devices_cnt);
4208 if (ret > 0)
4209 break;
4210 else if(ret < 0)
4211 return ret;
4212 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
4213 if (dmar_remove_dev_scope(info, atsr->segment,
4214 atsru->devices, atsru->devices_cnt))
4215 break;
4216 }
4217 }
4218
4219 return 0;
4220}
4221
Fenghua Yu99dcade2009-11-11 07:23:06 -08004222/*
4223 * Here we only respond to action of unbound device from driver.
4224 *
4225 * Added device is not attached to its DMAR domain here yet. That will happen
4226 * when mapping the device to iova.
4227 */
4228static int device_notifier(struct notifier_block *nb,
4229 unsigned long action, void *data)
4230{
4231 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08004232 struct dmar_domain *domain;
4233
David Woodhouse3d891942014-03-06 15:59:26 +00004234 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004235 return 0;
4236
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004237 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004238 return 0;
4239
David Woodhouse1525a292014-03-06 16:19:30 +00004240 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004241 if (!domain)
4242 return 0;
4243
Jiang Liu3a5670e2014-02-19 14:07:33 +08004244 down_read(&dmar_global_lock);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004245 domain_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004246 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004247 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08004248 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07004249
Fenghua Yu99dcade2009-11-11 07:23:06 -08004250 return 0;
4251}
4252
4253static struct notifier_block device_nb = {
4254 .notifier_call = device_notifier,
4255};
4256
Jiang Liu75f05562014-02-19 14:07:37 +08004257static int intel_iommu_memory_notifier(struct notifier_block *nb,
4258 unsigned long val, void *v)
4259{
4260 struct memory_notify *mhp = v;
4261 unsigned long long start, end;
4262 unsigned long start_vpfn, last_vpfn;
4263
4264 switch (val) {
4265 case MEM_GOING_ONLINE:
4266 start = mhp->start_pfn << PAGE_SHIFT;
4267 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4268 if (iommu_domain_identity_map(si_domain, start, end)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004269 pr_warn("Failed to build identity map for [%llx-%llx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004270 start, end);
4271 return NOTIFY_BAD;
4272 }
4273 break;
4274
4275 case MEM_OFFLINE:
4276 case MEM_CANCEL_ONLINE:
4277 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4278 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4279 while (start_vpfn <= last_vpfn) {
4280 struct iova *iova;
4281 struct dmar_drhd_unit *drhd;
4282 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004283 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004284
4285 iova = find_iova(&si_domain->iovad, start_vpfn);
4286 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004287 pr_debug("Failed get IOVA for PFN %lx\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004288 start_vpfn);
4289 break;
4290 }
4291
4292 iova = split_and_remove_iova(&si_domain->iovad, iova,
4293 start_vpfn, last_vpfn);
4294 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004295 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004296 start_vpfn, last_vpfn);
4297 return NOTIFY_BAD;
4298 }
4299
David Woodhouseea8ea462014-03-05 17:09:32 +00004300 freelist = domain_unmap(si_domain, iova->pfn_lo,
4301 iova->pfn_hi);
4302
Jiang Liu75f05562014-02-19 14:07:37 +08004303 rcu_read_lock();
4304 for_each_active_iommu(iommu, drhd)
4305 iommu_flush_iotlb_psi(iommu, si_domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08004306 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004307 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004308 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004309 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004310
4311 start_vpfn = iova->pfn_hi + 1;
4312 free_iova_mem(iova);
4313 }
4314 break;
4315 }
4316
4317 return NOTIFY_OK;
4318}
4319
4320static struct notifier_block intel_iommu_memory_nb = {
4321 .notifier_call = intel_iommu_memory_notifier,
4322 .priority = 0
4323};
4324
Alex Williamsona5459cf2014-06-12 16:12:31 -06004325
4326static ssize_t intel_iommu_show_version(struct device *dev,
4327 struct device_attribute *attr,
4328 char *buf)
4329{
4330 struct intel_iommu *iommu = dev_get_drvdata(dev);
4331 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4332 return sprintf(buf, "%d:%d\n",
4333 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4334}
4335static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4336
4337static ssize_t intel_iommu_show_address(struct device *dev,
4338 struct device_attribute *attr,
4339 char *buf)
4340{
4341 struct intel_iommu *iommu = dev_get_drvdata(dev);
4342 return sprintf(buf, "%llx\n", iommu->reg_phys);
4343}
4344static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4345
4346static ssize_t intel_iommu_show_cap(struct device *dev,
4347 struct device_attribute *attr,
4348 char *buf)
4349{
4350 struct intel_iommu *iommu = dev_get_drvdata(dev);
4351 return sprintf(buf, "%llx\n", iommu->cap);
4352}
4353static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4354
4355static ssize_t intel_iommu_show_ecap(struct device *dev,
4356 struct device_attribute *attr,
4357 char *buf)
4358{
4359 struct intel_iommu *iommu = dev_get_drvdata(dev);
4360 return sprintf(buf, "%llx\n", iommu->ecap);
4361}
4362static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4363
4364static struct attribute *intel_iommu_attrs[] = {
4365 &dev_attr_version.attr,
4366 &dev_attr_address.attr,
4367 &dev_attr_cap.attr,
4368 &dev_attr_ecap.attr,
4369 NULL,
4370};
4371
4372static struct attribute_group intel_iommu_group = {
4373 .name = "intel-iommu",
4374 .attrs = intel_iommu_attrs,
4375};
4376
4377const struct attribute_group *intel_iommu_groups[] = {
4378 &intel_iommu_group,
4379 NULL,
4380};
4381
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004382int __init intel_iommu_init(void)
4383{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004384 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004385 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004386 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004387
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004388 /* VT-d is required for a TXT/tboot launch, so enforce that */
4389 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004390
Jiang Liu3a5670e2014-02-19 14:07:33 +08004391 if (iommu_init_mempool()) {
4392 if (force_on)
4393 panic("tboot: Failed to initialize iommu memory\n");
4394 return -ENOMEM;
4395 }
4396
4397 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004398 if (dmar_table_init()) {
4399 if (force_on)
4400 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004401 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004402 }
4403
Takao Indoh3a93c842013-04-23 17:35:03 +09004404 /*
4405 * Disable translation if already enabled prior to OS handover.
4406 */
Jiang Liu7c919772014-01-06 14:18:18 +08004407 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09004408 if (iommu->gcmd & DMA_GCMD_TE)
4409 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09004410
Suresh Siddhac2c72862011-08-23 17:05:19 -07004411 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004412 if (force_on)
4413 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004414 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004415 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004416
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004417 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08004418 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07004419
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004420 if (list_empty(&dmar_rmrr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004421 pr_info("No RMRR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004422
4423 if (list_empty(&dmar_atsr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004424 pr_info("No ATSR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004425
Joseph Cihula51a63e62011-03-21 11:04:24 -07004426 if (dmar_init_reserved_ranges()) {
4427 if (force_on)
4428 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004429 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004430 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004431
4432 init_no_remapping_devices();
4433
Joseph Cihulab7792602011-05-03 00:08:37 -07004434 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004435 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004436 if (force_on)
4437 panic("tboot: Failed to initialize DMARs\n");
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004438 pr_err("Initialization failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004439 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004440 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004441 up_write(&dmar_global_lock);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004442 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004443
mark gross5e0d2a62008-03-04 15:22:08 -08004444 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004445#ifdef CONFIG_SWIOTLB
4446 swiotlb = 0;
4447#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004448 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004449
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004450 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004451
Alex Williamsona5459cf2014-06-12 16:12:31 -06004452 for_each_active_iommu(iommu, drhd)
4453 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4454 intel_iommu_groups,
4455 iommu->name);
4456
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004457 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004458 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004459 if (si_domain && !hw_pass_through)
4460 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004461
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004462 intel_iommu_enabled = 1;
4463
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004464 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004465
4466out_free_reserved_range:
4467 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004468out_free_dmar:
4469 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004470 up_write(&dmar_global_lock);
4471 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004472 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004473}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004474
Alex Williamson579305f2014-07-03 09:51:43 -06004475static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4476{
4477 struct intel_iommu *iommu = opaque;
4478
4479 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4480 return 0;
4481}
4482
4483/*
4484 * NB - intel-iommu lacks any sort of reference counting for the users of
4485 * dependent devices. If multiple endpoints have intersecting dependent
4486 * devices, unbinding the driver from any one of them will possibly leave
4487 * the others unable to operate.
4488 */
Han, Weidong3199aa62009-02-26 17:31:12 +08004489static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004490 struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004491{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004492 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004493 return;
4494
Alex Williamson579305f2014-07-03 09:51:43 -06004495 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004496}
4497
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004498static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004499 struct device *dev)
Weidong Hanc7151a82008-12-08 22:51:37 +08004500{
Yijing Wangbca2b912013-10-31 17:26:04 +08004501 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08004502 struct intel_iommu *iommu;
4503 unsigned long flags;
Quentin Lambert2f119c72015-02-06 10:59:53 +01004504 bool found = false;
David Woodhouse156baca2014-03-09 14:00:57 -07004505 u8 bus, devfn;
Weidong Hanc7151a82008-12-08 22:51:37 +08004506
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004507 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08004508 if (!iommu)
4509 return;
4510
4511 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08004512 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004513 if (info->iommu == iommu && info->bus == bus &&
4514 info->devfn == devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01004515 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004516 spin_unlock_irqrestore(&device_domain_lock, flags);
4517
Yu Zhao93a23a72009-05-18 13:51:37 +08004518 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004519 iommu_detach_dev(iommu, info->bus, info->devfn);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004520 iommu_detach_dependent_devices(iommu, dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08004521 free_devinfo_mem(info);
4522
4523 spin_lock_irqsave(&device_domain_lock, flags);
4524
4525 if (found)
4526 break;
4527 else
4528 continue;
4529 }
4530
4531 /* if there is no other devices under the same iommu
4532 * owned by this domain, clear this iommu in iommu_bmp
4533 * update iommu count and coherency
4534 */
David Woodhouse8bbc4412014-03-09 13:52:37 -07004535 if (info->iommu == iommu)
Quentin Lambert2f119c72015-02-06 10:59:53 +01004536 found = true;
Weidong Hanc7151a82008-12-08 22:51:37 +08004537 }
4538
Roland Dreier3e7abe22011-07-20 06:22:21 -07004539 spin_unlock_irqrestore(&device_domain_lock, flags);
4540
Weidong Hanc7151a82008-12-08 22:51:37 +08004541 if (found == 0) {
Jiang Liufb170fb2014-07-11 14:19:28 +08004542 domain_detach_iommu(domain, iommu);
4543 if (!domain_type_is_vm_or_si(domain))
4544 iommu_detach_domain(domain, iommu);
Weidong Hanc7151a82008-12-08 22:51:37 +08004545 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004546}
4547
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004548static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08004549{
4550 int adjust_width;
4551
Robin Murphy0fb5fe82015-01-12 17:51:16 +00004552 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4553 DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004554 domain_reserve_special_ranges(domain);
4555
4556 /* calculate AGAW */
4557 domain->gaw = guest_width;
4558 adjust_width = guestwidth_to_adjustwidth(guest_width);
4559 domain->agaw = width_to_agaw(adjust_width);
4560
Weidong Han5e98c4b2008-12-08 23:03:27 +08004561 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004562 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004563 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004564 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004565
4566 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004567 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004568 if (!domain->pgd)
4569 return -ENOMEM;
4570 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4571 return 0;
4572}
4573
Joerg Roedel00a77de2015-03-26 13:43:08 +01004574static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
Kay, Allen M38717942008-09-09 18:37:29 +03004575{
Joerg Roedel5d450802008-12-03 14:52:32 +01004576 struct dmar_domain *dmar_domain;
Joerg Roedel00a77de2015-03-26 13:43:08 +01004577 struct iommu_domain *domain;
4578
4579 if (type != IOMMU_DOMAIN_UNMANAGED)
4580 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004581
Jiang Liuab8dfe22014-07-11 14:19:27 +08004582 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004583 if (!dmar_domain) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004584 pr_err("Can't allocate dmar_domain\n");
Joerg Roedel00a77de2015-03-26 13:43:08 +01004585 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004586 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004587 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004588 pr_err("Domain initialization failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004589 domain_exit(dmar_domain);
Joerg Roedel00a77de2015-03-26 13:43:08 +01004590 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004591 }
Allen Kay8140a952011-10-14 12:32:17 -07004592 domain_update_iommu_cap(dmar_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004593
Joerg Roedel00a77de2015-03-26 13:43:08 +01004594 domain = &dmar_domain->domain;
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004595 domain->geometry.aperture_start = 0;
4596 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4597 domain->geometry.force_aperture = true;
4598
Joerg Roedel00a77de2015-03-26 13:43:08 +01004599 return domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004600}
Kay, Allen M38717942008-09-09 18:37:29 +03004601
Joerg Roedel00a77de2015-03-26 13:43:08 +01004602static void intel_iommu_domain_free(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004603{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004604 domain_exit(to_dmar_domain(domain));
Kay, Allen M38717942008-09-09 18:37:29 +03004605}
Kay, Allen M38717942008-09-09 18:37:29 +03004606
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004607static int intel_iommu_attach_device(struct iommu_domain *domain,
4608 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004609{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004610 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004611 struct intel_iommu *iommu;
4612 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004613 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004614
Alex Williamsonc875d2c2014-07-03 09:57:02 -06004615 if (device_is_rmrr_locked(dev)) {
4616 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4617 return -EPERM;
4618 }
4619
David Woodhouse7207d8f2014-03-09 16:31:06 -07004620 /* normally dev is not mapped */
4621 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004622 struct dmar_domain *old_domain;
4623
David Woodhouse1525a292014-03-06 16:19:30 +00004624 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004625 if (old_domain) {
Jiang Liuab8dfe22014-07-11 14:19:27 +08004626 if (domain_type_is_vm_or_si(dmar_domain))
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004627 domain_remove_one_dev_info(old_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004628 else
4629 domain_remove_dev_info(old_domain);
Joerg Roedel62c22162014-12-09 12:56:45 +01004630
4631 if (!domain_type_is_vm_or_si(old_domain) &&
4632 list_empty(&old_domain->devices))
4633 domain_exit(old_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004634 }
4635 }
4636
David Woodhouse156baca2014-03-09 14:00:57 -07004637 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004638 if (!iommu)
4639 return -ENODEV;
4640
4641 /* check if this iommu agaw is sufficient for max mapped address */
4642 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004643 if (addr_width > cap_mgaw(iommu->cap))
4644 addr_width = cap_mgaw(iommu->cap);
4645
4646 if (dmar_domain->max_addr > (1LL << addr_width)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004647 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004648 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004649 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004650 return -EFAULT;
4651 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004652 dmar_domain->gaw = addr_width;
4653
4654 /*
4655 * Knock out extra levels of page tables if necessary
4656 */
4657 while (iommu->agaw < dmar_domain->agaw) {
4658 struct dma_pte *pte;
4659
4660 pte = dmar_domain->pgd;
4661 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004662 dmar_domain->pgd = (struct dma_pte *)
4663 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004664 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004665 }
4666 dmar_domain->agaw--;
4667 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004668
David Woodhouse5913c9b2014-03-09 16:27:31 -07004669 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004670}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004671
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004672static void intel_iommu_detach_device(struct iommu_domain *domain,
4673 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004674{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004675 domain_remove_one_dev_info(to_dmar_domain(domain), dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004676}
Kay, Allen M38717942008-09-09 18:37:29 +03004677
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004678static int intel_iommu_map(struct iommu_domain *domain,
4679 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004680 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004681{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004682 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004683 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004684 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004685 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004686
Joerg Roedeldde57a22008-12-03 15:04:09 +01004687 if (iommu_prot & IOMMU_READ)
4688 prot |= DMA_PTE_READ;
4689 if (iommu_prot & IOMMU_WRITE)
4690 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004691 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4692 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004693
David Woodhouse163cc522009-06-28 00:51:17 +01004694 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004695 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004696 u64 end;
4697
4698 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004699 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004700 if (end < max_addr) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004701 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004702 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004703 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004704 return -EFAULT;
4705 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004706 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004707 }
David Woodhousead051222009-06-28 14:22:28 +01004708 /* Round up size to next multiple of PAGE_SIZE, if it and
4709 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004710 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004711 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4712 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004713 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004714}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004715
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004716static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004717 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004718{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004719 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
David Woodhouseea8ea462014-03-05 17:09:32 +00004720 struct page *freelist = NULL;
4721 struct intel_iommu *iommu;
4722 unsigned long start_pfn, last_pfn;
4723 unsigned int npages;
4724 int iommu_id, num, ndomains, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004725
David Woodhouse5cf0a762014-03-19 16:07:49 +00004726 /* Cope with horrid API which requires us to unmap more than the
4727 size argument if it happens to be a large-page mapping. */
4728 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4729 BUG();
4730
4731 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4732 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4733
David Woodhouseea8ea462014-03-05 17:09:32 +00004734 start_pfn = iova >> VTD_PAGE_SHIFT;
4735 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4736
4737 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4738
4739 npages = last_pfn - start_pfn + 1;
4740
4741 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4742 iommu = g_iommus[iommu_id];
4743
4744 /*
4745 * find bit position of dmar_domain
4746 */
4747 ndomains = cap_ndoms(iommu->cap);
4748 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4749 if (iommu->domains[num] == dmar_domain)
4750 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4751 npages, !freelist, 0);
4752 }
4753
4754 }
4755
4756 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004757
David Woodhouse163cc522009-06-28 00:51:17 +01004758 if (dmar_domain->max_addr == iova + size)
4759 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004760
David Woodhouse5cf0a762014-03-19 16:07:49 +00004761 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004762}
Kay, Allen M38717942008-09-09 18:37:29 +03004763
Joerg Roedeld14d6572008-12-03 15:06:57 +01004764static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05304765 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004766{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004767 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004768 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004769 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004770 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004771
David Woodhouse5cf0a762014-03-19 16:07:49 +00004772 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004773 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004774 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004775
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004776 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004777}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004778
Joerg Roedel5d587b82014-09-05 10:50:45 +02004779static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004780{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004781 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004782 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04004783 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004784 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004785
Joerg Roedel5d587b82014-09-05 10:50:45 +02004786 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004787}
4788
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004789static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004790{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004791 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004792 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07004793 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004794
Alex Williamsona5459cf2014-06-12 16:12:31 -06004795 iommu = device_to_iommu(dev, &bus, &devfn);
4796 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004797 return -ENODEV;
4798
Alex Williamsona5459cf2014-06-12 16:12:31 -06004799 iommu_device_link(iommu->iommu_dev, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004800
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004801 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06004802
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004803 if (IS_ERR(group))
4804 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004805
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004806 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004807 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004808}
4809
4810static void intel_iommu_remove_device(struct device *dev)
4811{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004812 struct intel_iommu *iommu;
4813 u8 bus, devfn;
4814
4815 iommu = device_to_iommu(dev, &bus, &devfn);
4816 if (!iommu)
4817 return;
4818
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004819 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004820
4821 iommu_device_unlink(iommu->iommu_dev, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004822}
4823
Thierry Redingb22f6432014-06-27 09:03:12 +02004824static const struct iommu_ops intel_iommu_ops = {
Joerg Roedel5d587b82014-09-05 10:50:45 +02004825 .capable = intel_iommu_capable,
Joerg Roedel00a77de2015-03-26 13:43:08 +01004826 .domain_alloc = intel_iommu_domain_alloc,
4827 .domain_free = intel_iommu_domain_free,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004828 .attach_dev = intel_iommu_attach_device,
4829 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004830 .map = intel_iommu_map,
4831 .unmap = intel_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07004832 .map_sg = default_iommu_map_sg,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004833 .iova_to_phys = intel_iommu_iova_to_phys,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004834 .add_device = intel_iommu_add_device,
4835 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004836 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004837};
David Woodhouse9af88142009-02-13 23:18:03 +00004838
Daniel Vetter94526182013-01-20 23:50:13 +01004839static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4840{
4841 /* G4x/GM45 integrated gfx dmar support is totally busted. */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004842 pr_info("Disabling IOMMU for graphics on this chipset\n");
Daniel Vetter94526182013-01-20 23:50:13 +01004843 dmar_map_gfx = 0;
4844}
4845
4846DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4847DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4848DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4849DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4850DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4851DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4852DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4853
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004854static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004855{
4856 /*
4857 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004858 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004859 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004860 pr_info("Forcing write-buffer flush capability\n");
David Woodhouse9af88142009-02-13 23:18:03 +00004861 rwbf_quirk = 1;
4862}
4863
4864DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004865DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4866DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4867DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4868DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4869DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4870DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004871
Adam Jacksoneecfd572010-08-25 21:17:34 +01004872#define GGC 0x52
4873#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4874#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4875#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4876#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4877#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4878#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4879#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4880#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4881
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004882static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004883{
4884 unsigned short ggc;
4885
Adam Jacksoneecfd572010-08-25 21:17:34 +01004886 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004887 return;
4888
Adam Jacksoneecfd572010-08-25 21:17:34 +01004889 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004890 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
David Woodhouse9eecabc2010-09-21 22:28:23 +01004891 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004892 } else if (dmar_map_gfx) {
4893 /* we have to ensure the gfx device is idle before we flush */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004894 pr_info("Disabling batched IOTLB flush on Ironlake\n");
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004895 intel_iommu_strict = 1;
4896 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004897}
4898DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4899DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4900DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4901DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4902
David Woodhousee0fc7e02009-09-30 09:12:17 -07004903/* On Tylersburg chipsets, some BIOSes have been known to enable the
4904 ISOCH DMAR unit for the Azalia sound device, but not give it any
4905 TLB entries, which causes it to deadlock. Check for that. We do
4906 this in a function called from init_dmars(), instead of in a PCI
4907 quirk, because we don't want to print the obnoxious "BIOS broken"
4908 message if VT-d is actually disabled.
4909*/
4910static void __init check_tylersburg_isoch(void)
4911{
4912 struct pci_dev *pdev;
4913 uint32_t vtisochctrl;
4914
4915 /* If there's no Azalia in the system anyway, forget it. */
4916 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4917 if (!pdev)
4918 return;
4919 pci_dev_put(pdev);
4920
4921 /* System Management Registers. Might be hidden, in which case
4922 we can't do the sanity check. But that's OK, because the
4923 known-broken BIOSes _don't_ actually hide it, so far. */
4924 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4925 if (!pdev)
4926 return;
4927
4928 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4929 pci_dev_put(pdev);
4930 return;
4931 }
4932
4933 pci_dev_put(pdev);
4934
4935 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4936 if (vtisochctrl & 1)
4937 return;
4938
4939 /* Drop all bits other than the number of TLB entries */
4940 vtisochctrl &= 0x1c;
4941
4942 /* If we have the recommended number of TLB entries (16), fine. */
4943 if (vtisochctrl == 0x10)
4944 return;
4945
4946 /* Zero TLB entries? You get to ride the short bus to school. */
4947 if (!vtisochctrl) {
4948 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4949 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4950 dmi_get_system_info(DMI_BIOS_VENDOR),
4951 dmi_get_system_info(DMI_BIOS_VERSION),
4952 dmi_get_system_info(DMI_PRODUCT_VERSION));
4953 iommu_identity_mapping |= IDENTMAP_AZALIA;
4954 return;
4955 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004956
4957 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
David Woodhousee0fc7e02009-09-30 09:12:17 -07004958 vtisochctrl);
4959}