blob: 9476c1b96090de200dd3f48dee23dc080653c971 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040027#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070028#include <linux/slab.h>
29#include <linux/irq.h>
30#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070031#include <linux/spinlock.h>
32#include <linux/pci.h>
33#include <linux/dmar.h>
34#include <linux/dma-mapping.h>
35#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080044#include <linux/memblock.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070045#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090047#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070048
Fenghua Yu5b6985c2008-10-16 18:02:32 -070049#define ROOT_SIZE VTD_PAGE_SIZE
50#define CONTEXT_SIZE VTD_PAGE_SIZE
51
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070054#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055
56#define IOAPIC_RANGE_START (0xfee00000)
57#define IOAPIC_RANGE_END (0xfeefffff)
58#define IOVA_START_ADDR (0x1000)
59
60#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
61
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070062#define MAX_AGAW_WIDTH 64
63
David Woodhouse2ebe3152009-09-19 07:34:04 -070064#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
65#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
66
67/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
68 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
69#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
70 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
71#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070072
Mark McLoughlinf27be032008-11-20 15:49:43 +000073#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070074#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070075#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080076
Andrew Mortondf08cdc2010-09-22 13:05:11 -070077/* page table handling */
78#define LEVEL_STRIDE (9)
79#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
80
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020081/*
82 * This bitmap is used to advertise the page sizes our hardware support
83 * to the IOMMU core, which will then use this information to split
84 * physically contiguous memory regions it is mapping into page sizes
85 * that we support.
86 *
87 * Traditionally the IOMMU core just handed us the mappings directly,
88 * after making sure the size is an order of a 4KiB page and that the
89 * mapping has natural alignment.
90 *
91 * To retain this behavior, we currently advertise that we support
92 * all page sizes that are an order of 4KiB.
93 *
94 * If at some point we'd like to utilize the IOMMU core's new behavior,
95 * we could change this to advertise the real page sizes we support.
96 */
97#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
98
Andrew Mortondf08cdc2010-09-22 13:05:11 -070099static inline int agaw_to_level(int agaw)
100{
101 return agaw + 2;
102}
103
104static inline int agaw_to_width(int agaw)
105{
106 return 30 + agaw * LEVEL_STRIDE;
107}
108
109static inline int width_to_agaw(int width)
110{
111 return (width - 30) / LEVEL_STRIDE;
112}
113
114static inline unsigned int level_to_offset_bits(int level)
115{
116 return (level - 1) * LEVEL_STRIDE;
117}
118
119static inline int pfn_level_offset(unsigned long pfn, int level)
120{
121 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
122}
123
124static inline unsigned long level_mask(int level)
125{
126 return -1UL << level_to_offset_bits(level);
127}
128
129static inline unsigned long level_size(int level)
130{
131 return 1UL << level_to_offset_bits(level);
132}
133
134static inline unsigned long align_to_level(unsigned long pfn, int level)
135{
136 return (pfn + level_size(level) - 1) & level_mask(level);
137}
David Woodhousefd18de52009-05-10 23:57:41 +0100138
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100139static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
140{
141 return 1 << ((lvl - 1) * LEVEL_STRIDE);
142}
143
David Woodhousedd4e8312009-06-27 16:21:20 +0100144/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
145 are never going to work. */
146static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
147{
148 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
149}
150
151static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
152{
153 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
154}
155static inline unsigned long page_to_dma_pfn(struct page *pg)
156{
157 return mm_to_dma_pfn(page_to_pfn(pg));
158}
159static inline unsigned long virt_to_dma_pfn(void *p)
160{
161 return page_to_dma_pfn(virt_to_page(p));
162}
163
Weidong Hand9630fe2008-12-08 11:06:32 +0800164/* global iommu list, set NULL for ignored DMAR units */
165static struct intel_iommu **g_iommus;
166
David Woodhousee0fc7e02009-09-30 09:12:17 -0700167static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000168static int rwbf_quirk;
169
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000170/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700171 * set to 1 to panic kernel if can't successfully enable VT-d
172 * (used when kernel is launched w/ TXT)
173 */
174static int force_on = 0;
175
176/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000177 * 0: Present
178 * 1-11: Reserved
179 * 12-63: Context Ptr (12 - (haw-1))
180 * 64-127: Reserved
181 */
182struct root_entry {
183 u64 val;
184 u64 rsvd1;
185};
186#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
187static inline bool root_present(struct root_entry *root)
188{
189 return (root->val & 1);
190}
191static inline void set_root_present(struct root_entry *root)
192{
193 root->val |= 1;
194}
195static inline void set_root_value(struct root_entry *root, unsigned long value)
196{
197 root->val |= value & VTD_PAGE_MASK;
198}
199
200static inline struct context_entry *
201get_context_addr_from_root(struct root_entry *root)
202{
203 return (struct context_entry *)
204 (root_present(root)?phys_to_virt(
205 root->val & VTD_PAGE_MASK) :
206 NULL);
207}
208
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000209/*
210 * low 64 bits:
211 * 0: present
212 * 1: fault processing disable
213 * 2-3: translation type
214 * 12-63: address space root
215 * high 64 bits:
216 * 0-2: address width
217 * 3-6: aval
218 * 8-23: domain id
219 */
220struct context_entry {
221 u64 lo;
222 u64 hi;
223};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000224
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000225static inline bool context_present(struct context_entry *context)
226{
227 return (context->lo & 1);
228}
229static inline void context_set_present(struct context_entry *context)
230{
231 context->lo |= 1;
232}
233
234static inline void context_set_fault_enable(struct context_entry *context)
235{
236 context->lo &= (((u64)-1) << 2) | 1;
237}
238
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000239static inline void context_set_translation_type(struct context_entry *context,
240 unsigned long value)
241{
242 context->lo &= (((u64)-1) << 4) | 3;
243 context->lo |= (value & 3) << 2;
244}
245
246static inline void context_set_address_root(struct context_entry *context,
247 unsigned long value)
248{
249 context->lo |= value & VTD_PAGE_MASK;
250}
251
252static inline void context_set_address_width(struct context_entry *context,
253 unsigned long value)
254{
255 context->hi |= value & 7;
256}
257
258static inline void context_set_domain_id(struct context_entry *context,
259 unsigned long value)
260{
261 context->hi |= (value & ((1 << 16) - 1)) << 8;
262}
263
264static inline void context_clear_entry(struct context_entry *context)
265{
266 context->lo = 0;
267 context->hi = 0;
268}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000269
Mark McLoughlin622ba122008-11-20 15:49:46 +0000270/*
271 * 0: readable
272 * 1: writable
273 * 2-6: reserved
274 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800275 * 8-10: available
276 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000277 * 12-63: Host physcial address
278 */
279struct dma_pte {
280 u64 val;
281};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000282
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000283static inline void dma_clear_pte(struct dma_pte *pte)
284{
285 pte->val = 0;
286}
287
288static inline void dma_set_pte_readable(struct dma_pte *pte)
289{
290 pte->val |= DMA_PTE_READ;
291}
292
293static inline void dma_set_pte_writable(struct dma_pte *pte)
294{
295 pte->val |= DMA_PTE_WRITE;
296}
297
Sheng Yang9cf06692009-03-18 15:33:07 +0800298static inline void dma_set_pte_snp(struct dma_pte *pte)
299{
300 pte->val |= DMA_PTE_SNP;
301}
302
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000303static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
304{
305 pte->val = (pte->val & ~3) | (prot & 3);
306}
307
308static inline u64 dma_pte_addr(struct dma_pte *pte)
309{
David Woodhousec85994e2009-07-01 19:21:24 +0100310#ifdef CONFIG_64BIT
311 return pte->val & VTD_PAGE_MASK;
312#else
313 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100314 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100315#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000316}
317
David Woodhousedd4e8312009-06-27 16:21:20 +0100318static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000319{
David Woodhousedd4e8312009-06-27 16:21:20 +0100320 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000321}
322
323static inline bool dma_pte_present(struct dma_pte *pte)
324{
325 return (pte->val & 3) != 0;
326}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000327
Allen Kay4399c8b2011-10-14 12:32:46 -0700328static inline bool dma_pte_superpage(struct dma_pte *pte)
329{
330 return (pte->val & (1 << 7));
331}
332
David Woodhouse75e6bf92009-07-02 11:21:16 +0100333static inline int first_pte_in_page(struct dma_pte *pte)
334{
335 return !((unsigned long)pte & ~VTD_PAGE_MASK);
336}
337
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700338/*
339 * This domain is a statically identity mapping domain.
340 * 1. This domain creats a static 1:1 mapping to all usable memory.
341 * 2. It maps to each iommu if successful.
342 * 3. Each iommu mapps to this domain if successful.
343 */
David Woodhouse19943b02009-08-04 16:19:20 +0100344static struct dmar_domain *si_domain;
345static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700346
Weidong Han3b5410e2008-12-08 09:17:15 +0800347/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100348#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800349
Weidong Han1ce28fe2008-12-08 16:35:39 +0800350/* domain represents a virtual machine, more than one devices
351 * across iommus may be owned in one domain, e.g. kvm guest.
352 */
353#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
354
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700355/* si_domain contains mulitple devices */
356#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
357
Mike Travis1b198bb2012-03-05 15:05:16 -0800358/* define the limit of IOMMUs supported in each domain */
359#ifdef CONFIG_X86
360# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
361#else
362# define IOMMU_UNITS_SUPPORTED 64
363#endif
364
Mark McLoughlin99126f72008-11-20 15:49:47 +0000365struct dmar_domain {
366 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700367 int nid; /* node id */
Mike Travis1b198bb2012-03-05 15:05:16 -0800368 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
369 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000370
371 struct list_head devices; /* all devices' list */
372 struct iova_domain iovad; /* iova's that belong to this domain */
373
374 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000375 int gaw; /* max guest address width */
376
377 /* adjusted guest address width, 0 is level 2 30-bit */
378 int agaw;
379
Weidong Han3b5410e2008-12-08 09:17:15 +0800380 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800381
382 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800383 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800384 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100385 int iommu_superpage;/* Level of superpages supported:
386 0 == 4KiB (no superpages), 1 == 2MiB,
387 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800388 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800389 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000390};
391
Mark McLoughlina647dac2008-11-20 15:49:48 +0000392/* PCI domain-device relationship */
393struct device_domain_info {
394 struct list_head link; /* link to domain siblings */
395 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100396 int segment; /* PCI domain */
397 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000398 u8 devfn; /* PCI devfn number */
Stefan Assmann45e829e2009-12-03 06:49:24 -0500399 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800400 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000401 struct dmar_domain *domain; /* pointer to domain */
402};
403
mark gross5e0d2a62008-03-04 15:22:08 -0800404static void flush_unmaps_timeout(unsigned long data);
405
406DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
407
mark gross80b20dd2008-04-18 13:53:58 -0700408#define HIGH_WATER_MARK 250
409struct deferred_flush_tables {
410 int next;
411 struct iova *iova[HIGH_WATER_MARK];
412 struct dmar_domain *domain[HIGH_WATER_MARK];
413};
414
415static struct deferred_flush_tables *deferred_flush;
416
mark gross5e0d2a62008-03-04 15:22:08 -0800417/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800418static int g_num_of_iommus;
419
420static DEFINE_SPINLOCK(async_umap_flush_lock);
421static LIST_HEAD(unmaps_to_do);
422
423static int timer_on;
424static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800425
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700426static void domain_remove_dev_info(struct dmar_domain *domain);
427
Suresh Siddhad3f13812011-08-23 17:05:25 -0700428#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800429int dmar_disabled = 0;
430#else
431int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700432#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800433
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200434int intel_iommu_enabled = 0;
435EXPORT_SYMBOL_GPL(intel_iommu_enabled);
436
David Woodhouse2d9e6672010-06-15 10:57:57 +0100437static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700438static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800439static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100440static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700441
David Woodhousec0771df2011-10-14 20:59:46 +0100442int intel_iommu_gfx_mapped;
443EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
444
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700445#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
446static DEFINE_SPINLOCK(device_domain_lock);
447static LIST_HEAD(device_domain_list);
448
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100449static struct iommu_ops intel_iommu_ops;
450
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700451static int __init intel_iommu_setup(char *str)
452{
453 if (!str)
454 return -EINVAL;
455 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800456 if (!strncmp(str, "on", 2)) {
457 dmar_disabled = 0;
458 printk(KERN_INFO "Intel-IOMMU: enabled\n");
459 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700460 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800461 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700462 } else if (!strncmp(str, "igfx_off", 8)) {
463 dmar_map_gfx = 0;
464 printk(KERN_INFO
465 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700466 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800467 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700468 "Intel-IOMMU: Forcing DAC for PCI devices\n");
469 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800470 } else if (!strncmp(str, "strict", 6)) {
471 printk(KERN_INFO
472 "Intel-IOMMU: disable batched IOTLB flush\n");
473 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100474 } else if (!strncmp(str, "sp_off", 6)) {
475 printk(KERN_INFO
476 "Intel-IOMMU: disable supported super page\n");
477 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700478 }
479
480 str += strcspn(str, ",");
481 while (*str == ',')
482 str++;
483 }
484 return 0;
485}
486__setup("intel_iommu=", intel_iommu_setup);
487
488static struct kmem_cache *iommu_domain_cache;
489static struct kmem_cache *iommu_devinfo_cache;
490static struct kmem_cache *iommu_iova_cache;
491
Suresh Siddha4c923d42009-10-02 11:01:24 -0700492static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700493{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700494 struct page *page;
495 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700496
Suresh Siddha4c923d42009-10-02 11:01:24 -0700497 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
498 if (page)
499 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700500 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700501}
502
503static inline void free_pgtable_page(void *vaddr)
504{
505 free_page((unsigned long)vaddr);
506}
507
508static inline void *alloc_domain_mem(void)
509{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900510 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700511}
512
Kay, Allen M38717942008-09-09 18:37:29 +0300513static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700514{
515 kmem_cache_free(iommu_domain_cache, vaddr);
516}
517
518static inline void * alloc_devinfo_mem(void)
519{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900520 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700521}
522
523static inline void free_devinfo_mem(void *vaddr)
524{
525 kmem_cache_free(iommu_devinfo_cache, vaddr);
526}
527
528struct iova *alloc_iova_mem(void)
529{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900530 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700531}
532
533void free_iova_mem(struct iova *iova)
534{
535 kmem_cache_free(iommu_iova_cache, iova);
536}
537
Weidong Han1b573682008-12-08 15:34:06 +0800538
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700539static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800540{
541 unsigned long sagaw;
542 int agaw = -1;
543
544 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700545 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800546 agaw >= 0; agaw--) {
547 if (test_bit(agaw, &sagaw))
548 break;
549 }
550
551 return agaw;
552}
553
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700554/*
555 * Calculate max SAGAW for each iommu.
556 */
557int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
558{
559 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
560}
561
562/*
563 * calculate agaw for each iommu.
564 * "SAGAW" may be different across iommus, use a default agaw, and
565 * get a supported less agaw for iommus that don't support the default agaw.
566 */
567int iommu_calculate_agaw(struct intel_iommu *iommu)
568{
569 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
570}
571
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700572/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800573static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
574{
575 int iommu_id;
576
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700577 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800578 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700579 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800580
Mike Travis1b198bb2012-03-05 15:05:16 -0800581 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800582 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
583 return NULL;
584
585 return g_iommus[iommu_id];
586}
587
Weidong Han8e6040972008-12-08 15:49:06 +0800588static void domain_update_iommu_coherency(struct dmar_domain *domain)
589{
590 int i;
591
Alex Williamson2e12bc22011-11-11 17:26:44 -0700592 i = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
593
594 domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800595
Mike Travis1b198bb2012-03-05 15:05:16 -0800596 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Weidong Han8e6040972008-12-08 15:49:06 +0800597 if (!ecap_coherent(g_iommus[i]->ecap)) {
598 domain->iommu_coherency = 0;
599 break;
600 }
Weidong Han8e6040972008-12-08 15:49:06 +0800601 }
602}
603
Sheng Yang58c610b2009-03-18 15:33:05 +0800604static void domain_update_iommu_snooping(struct dmar_domain *domain)
605{
606 int i;
607
608 domain->iommu_snooping = 1;
609
Mike Travis1b198bb2012-03-05 15:05:16 -0800610 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800611 if (!ecap_sc_support(g_iommus[i]->ecap)) {
612 domain->iommu_snooping = 0;
613 break;
614 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800615 }
616}
617
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100618static void domain_update_iommu_superpage(struct dmar_domain *domain)
619{
Allen Kay8140a952011-10-14 12:32:17 -0700620 struct dmar_drhd_unit *drhd;
621 struct intel_iommu *iommu = NULL;
622 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100623
624 if (!intel_iommu_superpage) {
625 domain->iommu_superpage = 0;
626 return;
627 }
628
Allen Kay8140a952011-10-14 12:32:17 -0700629 /* set iommu_superpage to the smallest common denominator */
630 for_each_active_iommu(iommu, drhd) {
631 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100632 if (!mask) {
633 break;
634 }
635 }
636 domain->iommu_superpage = fls(mask);
637}
638
Sheng Yang58c610b2009-03-18 15:33:05 +0800639/* Some capabilities may be different across iommus */
640static void domain_update_iommu_cap(struct dmar_domain *domain)
641{
642 domain_update_iommu_coherency(domain);
643 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100644 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800645}
646
David Woodhouse276dbf992009-04-04 01:45:37 +0100647static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800648{
649 struct dmar_drhd_unit *drhd = NULL;
650 int i;
651
652 for_each_drhd_unit(drhd) {
653 if (drhd->ignored)
654 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100655 if (segment != drhd->segment)
656 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800657
David Woodhouse924b6232009-04-04 00:39:25 +0100658 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000659 if (drhd->devices[i] &&
660 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800661 drhd->devices[i]->devfn == devfn)
662 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700663 if (drhd->devices[i] &&
664 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100665 drhd->devices[i]->subordinate->number <= bus &&
Yinghai Lub918c622012-05-17 18:51:11 -0700666 drhd->devices[i]->subordinate->busn_res.end >= bus)
David Woodhouse924b6232009-04-04 00:39:25 +0100667 return drhd->iommu;
668 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800669
670 if (drhd->include_all)
671 return drhd->iommu;
672 }
673
674 return NULL;
675}
676
Weidong Han5331fe62008-12-08 23:00:00 +0800677static void domain_flush_cache(struct dmar_domain *domain,
678 void *addr, int size)
679{
680 if (!domain->iommu_coherency)
681 clflush_cache_range(addr, size);
682}
683
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700684/* Gets context entry for a given bus and devfn */
685static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
686 u8 bus, u8 devfn)
687{
688 struct root_entry *root;
689 struct context_entry *context;
690 unsigned long phy_addr;
691 unsigned long flags;
692
693 spin_lock_irqsave(&iommu->lock, flags);
694 root = &iommu->root_entry[bus];
695 context = get_context_addr_from_root(root);
696 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700697 context = (struct context_entry *)
698 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700699 if (!context) {
700 spin_unlock_irqrestore(&iommu->lock, flags);
701 return NULL;
702 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700703 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700704 phy_addr = virt_to_phys((void *)context);
705 set_root_value(root, phy_addr);
706 set_root_present(root);
707 __iommu_flush_cache(iommu, root, sizeof(*root));
708 }
709 spin_unlock_irqrestore(&iommu->lock, flags);
710 return &context[devfn];
711}
712
713static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
714{
715 struct root_entry *root;
716 struct context_entry *context;
717 int ret;
718 unsigned long flags;
719
720 spin_lock_irqsave(&iommu->lock, flags);
721 root = &iommu->root_entry[bus];
722 context = get_context_addr_from_root(root);
723 if (!context) {
724 ret = 0;
725 goto out;
726 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000727 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700728out:
729 spin_unlock_irqrestore(&iommu->lock, flags);
730 return ret;
731}
732
733static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
734{
735 struct root_entry *root;
736 struct context_entry *context;
737 unsigned long flags;
738
739 spin_lock_irqsave(&iommu->lock, flags);
740 root = &iommu->root_entry[bus];
741 context = get_context_addr_from_root(root);
742 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000743 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700744 __iommu_flush_cache(iommu, &context[devfn], \
745 sizeof(*context));
746 }
747 spin_unlock_irqrestore(&iommu->lock, flags);
748}
749
750static void free_context_table(struct intel_iommu *iommu)
751{
752 struct root_entry *root;
753 int i;
754 unsigned long flags;
755 struct context_entry *context;
756
757 spin_lock_irqsave(&iommu->lock, flags);
758 if (!iommu->root_entry) {
759 goto out;
760 }
761 for (i = 0; i < ROOT_ENTRY_NR; i++) {
762 root = &iommu->root_entry[i];
763 context = get_context_addr_from_root(root);
764 if (context)
765 free_pgtable_page(context);
766 }
767 free_pgtable_page(iommu->root_entry);
768 iommu->root_entry = NULL;
769out:
770 spin_unlock_irqrestore(&iommu->lock, flags);
771}
772
David Woodhouseb026fd22009-06-28 10:37:25 +0100773static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
Allen Kay4399c8b2011-10-14 12:32:46 -0700774 unsigned long pfn, int target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700775{
David Woodhouseb026fd22009-06-28 10:37:25 +0100776 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700777 struct dma_pte *parent, *pte = NULL;
778 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700779 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700780
781 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100782 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700783 parent = domain->pgd;
784
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700785 while (level > 0) {
786 void *tmp_page;
787
David Woodhouseb026fd22009-06-28 10:37:25 +0100788 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700789 pte = &parent[offset];
Allen Kay4399c8b2011-10-14 12:32:46 -0700790 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100791 break;
792 if (level == target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700793 break;
794
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000795 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100796 uint64_t pteval;
797
Suresh Siddha4c923d42009-10-02 11:01:24 -0700798 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700799
David Woodhouse206a73c2009-07-01 19:30:28 +0100800 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700801 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +0100802
David Woodhousec85994e2009-07-01 19:21:24 +0100803 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400804 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100805 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
806 /* Someone else set it while we were thinking; use theirs. */
807 free_pgtable_page(tmp_page);
808 } else {
809 dma_pte_addr(pte);
810 domain_flush_cache(domain, pte, sizeof(*pte));
811 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700812 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000813 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700814 level--;
815 }
816
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700817 return pte;
818}
819
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100820
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700821/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100822static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
823 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100824 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700825{
826 struct dma_pte *parent, *pte = NULL;
827 int total = agaw_to_level(domain->agaw);
828 int offset;
829
830 parent = domain->pgd;
831 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100832 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700833 pte = &parent[offset];
834 if (level == total)
835 return pte;
836
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100837 if (!dma_pte_present(pte)) {
838 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700839 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100840 }
841
842 if (pte->val & DMA_PTE_LARGE_PAGE) {
843 *large_page = total;
844 return pte;
845 }
846
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000847 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700848 total--;
849 }
850 return NULL;
851}
852
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700853/* clear last level pte, a tlb flush should be followed */
Allen Kay292827c2011-10-14 12:31:54 -0700854static int dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100855 unsigned long start_pfn,
856 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700857{
David Woodhouse04b18e62009-06-27 19:15:01 +0100858 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100859 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100860 struct dma_pte *first_pte, *pte;
Allen Kay292827c2011-10-14 12:31:54 -0700861 int order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700862
David Woodhouse04b18e62009-06-27 19:15:01 +0100863 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100864 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700865 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100866
David Woodhouse04b18e62009-06-27 19:15:01 +0100867 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700868 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100869 large_page = 1;
870 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100871 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100872 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100873 continue;
874 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100875 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100876 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100877 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100878 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100879 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
880
David Woodhouse310a5ab2009-06-28 18:52:20 +0100881 domain_flush_cache(domain, first_pte,
882 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700883
884 } while (start_pfn && start_pfn <= last_pfn);
Allen Kay292827c2011-10-14 12:31:54 -0700885
886 order = (large_page - 1) * 9;
887 return order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700888}
889
890/* free page table pages. last level pte should already be cleared */
891static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100892 unsigned long start_pfn,
893 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700894{
David Woodhouse6660c632009-06-27 22:41:00 +0100895 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhousef3a0a522009-06-30 03:40:07 +0100896 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700897 int total = agaw_to_level(domain->agaw);
898 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100899 unsigned long tmp;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100900 int large_page = 2;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700901
David Woodhouse6660c632009-06-27 22:41:00 +0100902 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
903 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700904 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700905
David Woodhousef3a0a522009-06-30 03:40:07 +0100906 /* We don't need lock here; nobody else touches the iova range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700907 level = 2;
908 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100909 tmp = align_to_level(start_pfn, level);
910
David Woodhousef3a0a522009-06-30 03:40:07 +0100911 /* If we can't even clear one PTE at this level, we're done */
David Woodhouse6660c632009-06-27 22:41:00 +0100912 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700913 return;
914
David Woodhouse59c36282009-09-19 07:36:28 -0700915 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100916 large_page = level;
917 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
918 if (large_page > level)
919 level = large_page + 1;
David Woodhousef3a0a522009-06-30 03:40:07 +0100920 if (!pte) {
921 tmp = align_to_level(tmp + 1, level + 1);
922 continue;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700923 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100924 do {
David Woodhouse6a43e572009-07-02 12:02:34 +0100925 if (dma_pte_present(pte)) {
926 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
927 dma_clear_pte(pte);
928 }
David Woodhousef3a0a522009-06-30 03:40:07 +0100929 pte++;
930 tmp += level_size(level);
David Woodhouse75e6bf92009-07-02 11:21:16 +0100931 } while (!first_pte_in_page(pte) &&
932 tmp + level_size(level) - 1 <= last_pfn);
933
David Woodhousef3a0a522009-06-30 03:40:07 +0100934 domain_flush_cache(domain, first_pte,
935 (void *)pte - (void *)first_pte);
936
David Woodhouse59c36282009-09-19 07:36:28 -0700937 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700938 level++;
939 }
940 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100941 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700942 free_pgtable_page(domain->pgd);
943 domain->pgd = NULL;
944 }
945}
946
947/* iommu handling */
948static int iommu_alloc_root_entry(struct intel_iommu *iommu)
949{
950 struct root_entry *root;
951 unsigned long flags;
952
Suresh Siddha4c923d42009-10-02 11:01:24 -0700953 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700954 if (!root)
955 return -ENOMEM;
956
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700957 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700958
959 spin_lock_irqsave(&iommu->lock, flags);
960 iommu->root_entry = root;
961 spin_unlock_irqrestore(&iommu->lock, flags);
962
963 return 0;
964}
965
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700966static void iommu_set_root_entry(struct intel_iommu *iommu)
967{
968 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100969 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700970 unsigned long flag;
971
972 addr = iommu->root_entry;
973
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200974 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700975 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
976
David Woodhousec416daa2009-05-10 20:30:58 +0100977 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700978
979 /* Make sure hardware complete it */
980 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100981 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700982
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200983 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700984}
985
986static void iommu_flush_write_buffer(struct intel_iommu *iommu)
987{
988 u32 val;
989 unsigned long flag;
990
David Woodhouse9af88142009-02-13 23:18:03 +0000991 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700992 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700993
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200994 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100995 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700996
997 /* Make sure hardware complete it */
998 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100999 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001000
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001001 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001002}
1003
1004/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001005static void __iommu_flush_context(struct intel_iommu *iommu,
1006 u16 did, u16 source_id, u8 function_mask,
1007 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001008{
1009 u64 val = 0;
1010 unsigned long flag;
1011
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001012 switch (type) {
1013 case DMA_CCMD_GLOBAL_INVL:
1014 val = DMA_CCMD_GLOBAL_INVL;
1015 break;
1016 case DMA_CCMD_DOMAIN_INVL:
1017 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1018 break;
1019 case DMA_CCMD_DEVICE_INVL:
1020 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1021 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1022 break;
1023 default:
1024 BUG();
1025 }
1026 val |= DMA_CCMD_ICC;
1027
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001028 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001029 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1030
1031 /* Make sure hardware complete it */
1032 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1033 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1034
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001035 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001036}
1037
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001038/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001039static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1040 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001041{
1042 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1043 u64 val = 0, val_iva = 0;
1044 unsigned long flag;
1045
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001046 switch (type) {
1047 case DMA_TLB_GLOBAL_FLUSH:
1048 /* global flush doesn't need set IVA_REG */
1049 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1050 break;
1051 case DMA_TLB_DSI_FLUSH:
1052 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1053 break;
1054 case DMA_TLB_PSI_FLUSH:
1055 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1056 /* Note: always flush non-leaf currently */
1057 val_iva = size_order | addr;
1058 break;
1059 default:
1060 BUG();
1061 }
1062 /* Note: set drain read/write */
1063#if 0
1064 /*
1065 * This is probably to be super secure.. Looks like we can
1066 * ignore it without any impact.
1067 */
1068 if (cap_read_drain(iommu->cap))
1069 val |= DMA_TLB_READ_DRAIN;
1070#endif
1071 if (cap_write_drain(iommu->cap))
1072 val |= DMA_TLB_WRITE_DRAIN;
1073
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001074 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001075 /* Note: Only uses first TLB reg currently */
1076 if (val_iva)
1077 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1078 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1079
1080 /* Make sure hardware complete it */
1081 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1082 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1083
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001084 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001085
1086 /* check IOTLB invalidation granularity */
1087 if (DMA_TLB_IAIG(val) == 0)
1088 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1089 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1090 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001091 (unsigned long long)DMA_TLB_IIRG(type),
1092 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001093}
1094
Yu Zhao93a23a72009-05-18 13:51:37 +08001095static struct device_domain_info *iommu_support_dev_iotlb(
1096 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001097{
Yu Zhao93a23a72009-05-18 13:51:37 +08001098 int found = 0;
1099 unsigned long flags;
1100 struct device_domain_info *info;
1101 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1102
1103 if (!ecap_dev_iotlb_support(iommu->ecap))
1104 return NULL;
1105
1106 if (!iommu->qi)
1107 return NULL;
1108
1109 spin_lock_irqsave(&device_domain_lock, flags);
1110 list_for_each_entry(info, &domain->devices, link)
1111 if (info->bus == bus && info->devfn == devfn) {
1112 found = 1;
1113 break;
1114 }
1115 spin_unlock_irqrestore(&device_domain_lock, flags);
1116
1117 if (!found || !info->dev)
1118 return NULL;
1119
1120 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1121 return NULL;
1122
1123 if (!dmar_find_matched_atsr_unit(info->dev))
1124 return NULL;
1125
1126 info->iommu = iommu;
1127
1128 return info;
1129}
1130
1131static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1132{
1133 if (!info)
1134 return;
1135
1136 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1137}
1138
1139static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1140{
1141 if (!info->dev || !pci_ats_enabled(info->dev))
1142 return;
1143
1144 pci_disable_ats(info->dev);
1145}
1146
1147static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1148 u64 addr, unsigned mask)
1149{
1150 u16 sid, qdep;
1151 unsigned long flags;
1152 struct device_domain_info *info;
1153
1154 spin_lock_irqsave(&device_domain_lock, flags);
1155 list_for_each_entry(info, &domain->devices, link) {
1156 if (!info->dev || !pci_ats_enabled(info->dev))
1157 continue;
1158
1159 sid = info->bus << 8 | info->devfn;
1160 qdep = pci_ats_queue_depth(info->dev);
1161 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1162 }
1163 spin_unlock_irqrestore(&device_domain_lock, flags);
1164}
1165
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001166static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
Nadav Amit82653632010-04-01 13:24:40 +03001167 unsigned long pfn, unsigned int pages, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001168{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001169 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001170 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001171
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001172 BUG_ON(pages == 0);
1173
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001174 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001175 * Fallback to domain selective flush if no PSI support or the size is
1176 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001177 * PSI requires page size to be 2 ^ x, and the base address is naturally
1178 * aligned to the size
1179 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001180 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1181 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001182 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001183 else
1184 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1185 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001186
1187 /*
Nadav Amit82653632010-04-01 13:24:40 +03001188 * In caching mode, changes of pages from non-present to present require
1189 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001190 */
Nadav Amit82653632010-04-01 13:24:40 +03001191 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001192 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001193}
1194
mark grossf8bab732008-02-08 04:18:38 -08001195static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1196{
1197 u32 pmen;
1198 unsigned long flags;
1199
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001200 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001201 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1202 pmen &= ~DMA_PMEN_EPM;
1203 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1204
1205 /* wait for the protected region status bit to clear */
1206 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1207 readl, !(pmen & DMA_PMEN_PRS), pmen);
1208
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001209 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001210}
1211
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001212static int iommu_enable_translation(struct intel_iommu *iommu)
1213{
1214 u32 sts;
1215 unsigned long flags;
1216
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001217 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001218 iommu->gcmd |= DMA_GCMD_TE;
1219 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001220
1221 /* Make sure hardware complete it */
1222 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001223 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001224
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001225 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001226 return 0;
1227}
1228
1229static int iommu_disable_translation(struct intel_iommu *iommu)
1230{
1231 u32 sts;
1232 unsigned long flag;
1233
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001234 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001235 iommu->gcmd &= ~DMA_GCMD_TE;
1236 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1237
1238 /* Make sure hardware complete it */
1239 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001240 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001241
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001242 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001243 return 0;
1244}
1245
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001246
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001247static int iommu_init_domains(struct intel_iommu *iommu)
1248{
1249 unsigned long ndomains;
1250 unsigned long nlongs;
1251
1252 ndomains = cap_ndoms(iommu->cap);
Masanari Iida68aeb962012-01-25 00:25:52 +09001253 pr_debug("IOMMU %d: Number of Domains supported <%ld>\n", iommu->seq_id,
Yinghai Lu680a7522010-04-08 19:58:23 +01001254 ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001255 nlongs = BITS_TO_LONGS(ndomains);
1256
Donald Dutile94a91b52009-08-20 16:51:34 -04001257 spin_lock_init(&iommu->lock);
1258
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001259 /* TBD: there might be 64K domains,
1260 * consider other allocation for future chip
1261 */
1262 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1263 if (!iommu->domain_ids) {
1264 printk(KERN_ERR "Allocating domain id array failed\n");
1265 return -ENOMEM;
1266 }
1267 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1268 GFP_KERNEL);
1269 if (!iommu->domains) {
1270 printk(KERN_ERR "Allocating domain array failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001271 return -ENOMEM;
1272 }
1273
1274 /*
1275 * if Caching mode is set, then invalid translations are tagged
1276 * with domainid 0. Hence we need to pre-allocate it.
1277 */
1278 if (cap_caching_mode(iommu->cap))
1279 set_bit(0, iommu->domain_ids);
1280 return 0;
1281}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001282
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001283
1284static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001285static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001286
1287void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001288{
1289 struct dmar_domain *domain;
1290 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001291 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001292
Donald Dutile94a91b52009-08-20 16:51:34 -04001293 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001294 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Donald Dutile94a91b52009-08-20 16:51:34 -04001295 domain = iommu->domains[i];
1296 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001297
Donald Dutile94a91b52009-08-20 16:51:34 -04001298 spin_lock_irqsave(&domain->iommu_lock, flags);
1299 if (--domain->iommu_count == 0) {
1300 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1301 vm_domain_exit(domain);
1302 else
1303 domain_exit(domain);
1304 }
1305 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001306 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001307 }
1308
1309 if (iommu->gcmd & DMA_GCMD_TE)
1310 iommu_disable_translation(iommu);
1311
1312 if (iommu->irq) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001313 irq_set_handler_data(iommu->irq, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001314 /* This will mask the irq */
1315 free_irq(iommu->irq, iommu);
1316 destroy_irq(iommu->irq);
1317 }
1318
1319 kfree(iommu->domains);
1320 kfree(iommu->domain_ids);
1321
Weidong Hand9630fe2008-12-08 11:06:32 +08001322 g_iommus[iommu->seq_id] = NULL;
1323
1324 /* if all iommus are freed, free g_iommus */
1325 for (i = 0; i < g_num_of_iommus; i++) {
1326 if (g_iommus[i])
1327 break;
1328 }
1329
1330 if (i == g_num_of_iommus)
1331 kfree(g_iommus);
1332
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001333 /* free context mapping */
1334 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001335}
1336
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001337static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001338{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001339 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001340
1341 domain = alloc_domain_mem();
1342 if (!domain)
1343 return NULL;
1344
Suresh Siddha4c923d42009-10-02 11:01:24 -07001345 domain->nid = -1;
Mike Travis1b198bb2012-03-05 15:05:16 -08001346 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Hand71a2f32008-12-07 21:13:41 +08001347 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001348
1349 return domain;
1350}
1351
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001352static int iommu_attach_domain(struct dmar_domain *domain,
1353 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001354{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001355 int num;
1356 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001357 unsigned long flags;
1358
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001359 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001360
1361 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001362
1363 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1364 if (num >= ndomains) {
1365 spin_unlock_irqrestore(&iommu->lock, flags);
1366 printk(KERN_ERR "IOMMU: no free domain ids\n");
1367 return -ENOMEM;
1368 }
1369
1370 domain->id = num;
1371 set_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001372 set_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001373 iommu->domains[num] = domain;
1374 spin_unlock_irqrestore(&iommu->lock, flags);
1375
1376 return 0;
1377}
1378
1379static void iommu_detach_domain(struct dmar_domain *domain,
1380 struct intel_iommu *iommu)
1381{
1382 unsigned long flags;
1383 int num, ndomains;
1384 int found = 0;
1385
1386 spin_lock_irqsave(&iommu->lock, flags);
1387 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001388 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001389 if (iommu->domains[num] == domain) {
1390 found = 1;
1391 break;
1392 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001393 }
1394
1395 if (found) {
1396 clear_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001397 clear_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001398 iommu->domains[num] = NULL;
1399 }
Weidong Han8c11e792008-12-08 15:29:22 +08001400 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001401}
1402
1403static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001404static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001405
Joseph Cihula51a63e62011-03-21 11:04:24 -07001406static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001407{
1408 struct pci_dev *pdev = NULL;
1409 struct iova *iova;
1410 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001411
David Millerf6611972008-02-06 01:36:23 -08001412 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001413
Mark Gross8a443df2008-03-04 14:59:31 -08001414 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1415 &reserved_rbtree_key);
1416
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001417 /* IOAPIC ranges shouldn't be accessed by DMA */
1418 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1419 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001420 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001421 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001422 return -ENODEV;
1423 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001424
1425 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1426 for_each_pci_dev(pdev) {
1427 struct resource *r;
1428
1429 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1430 r = &pdev->resource[i];
1431 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1432 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001433 iova = reserve_iova(&reserved_iova_list,
1434 IOVA_PFN(r->start),
1435 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001436 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001437 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001438 return -ENODEV;
1439 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001440 }
1441 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001442 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001443}
1444
1445static void domain_reserve_special_ranges(struct dmar_domain *domain)
1446{
1447 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1448}
1449
1450static inline int guestwidth_to_adjustwidth(int gaw)
1451{
1452 int agaw;
1453 int r = (gaw - 12) % 9;
1454
1455 if (r == 0)
1456 agaw = gaw;
1457 else
1458 agaw = gaw + 9 - r;
1459 if (agaw > 64)
1460 agaw = 64;
1461 return agaw;
1462}
1463
1464static int domain_init(struct dmar_domain *domain, int guest_width)
1465{
1466 struct intel_iommu *iommu;
1467 int adjust_width, agaw;
1468 unsigned long sagaw;
1469
David Millerf6611972008-02-06 01:36:23 -08001470 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001471 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001472
1473 domain_reserve_special_ranges(domain);
1474
1475 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001476 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001477 if (guest_width > cap_mgaw(iommu->cap))
1478 guest_width = cap_mgaw(iommu->cap);
1479 domain->gaw = guest_width;
1480 adjust_width = guestwidth_to_adjustwidth(guest_width);
1481 agaw = width_to_agaw(adjust_width);
1482 sagaw = cap_sagaw(iommu->cap);
1483 if (!test_bit(agaw, &sagaw)) {
1484 /* hardware doesn't support it, choose a bigger one */
1485 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1486 agaw = find_next_bit(&sagaw, 5, agaw);
1487 if (agaw >= 5)
1488 return -ENODEV;
1489 }
1490 domain->agaw = agaw;
1491 INIT_LIST_HEAD(&domain->devices);
1492
Weidong Han8e6040972008-12-08 15:49:06 +08001493 if (ecap_coherent(iommu->ecap))
1494 domain->iommu_coherency = 1;
1495 else
1496 domain->iommu_coherency = 0;
1497
Sheng Yang58c610b2009-03-18 15:33:05 +08001498 if (ecap_sc_support(iommu->ecap))
1499 domain->iommu_snooping = 1;
1500 else
1501 domain->iommu_snooping = 0;
1502
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001503 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
Weidong Hanc7151a82008-12-08 22:51:37 +08001504 domain->iommu_count = 1;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001505 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001506
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001507 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001508 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001509 if (!domain->pgd)
1510 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001511 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001512 return 0;
1513}
1514
1515static void domain_exit(struct dmar_domain *domain)
1516{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001517 struct dmar_drhd_unit *drhd;
1518 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001519
1520 /* Domain 0 is reserved, so dont process it */
1521 if (!domain)
1522 return;
1523
Alex Williamson7b668352011-05-24 12:02:41 +01001524 /* Flush any lazy unmaps that may reference this domain */
1525 if (!intel_iommu_strict)
1526 flush_unmaps_timeout(0);
1527
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001528 domain_remove_dev_info(domain);
1529 /* destroy iovas */
1530 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001531
1532 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001533 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001534
1535 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001536 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001537
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001538 for_each_active_iommu(iommu, drhd)
Mike Travis1b198bb2012-03-05 15:05:16 -08001539 if (test_bit(iommu->seq_id, domain->iommu_bmp))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001540 iommu_detach_domain(domain, iommu);
1541
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001542 free_domain_mem(domain);
1543}
1544
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001545static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1546 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001547{
1548 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001549 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001550 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001551 struct dma_pte *pgd;
1552 unsigned long num;
1553 unsigned long ndomains;
1554 int id;
1555 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001556 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001557
1558 pr_debug("Set context mapping for %02x:%02x.%d\n",
1559 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001560
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001561 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001562 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1563 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001564
David Woodhouse276dbf992009-04-04 01:45:37 +01001565 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001566 if (!iommu)
1567 return -ENODEV;
1568
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001569 context = device_to_context_entry(iommu, bus, devfn);
1570 if (!context)
1571 return -ENOMEM;
1572 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001573 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001574 spin_unlock_irqrestore(&iommu->lock, flags);
1575 return 0;
1576 }
1577
Weidong Hanea6606b2008-12-08 23:08:15 +08001578 id = domain->id;
1579 pgd = domain->pgd;
1580
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001581 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1582 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001583 int found = 0;
1584
1585 /* find an available domain id for this device in iommu */
1586 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001587 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001588 if (iommu->domains[num] == domain) {
1589 id = num;
1590 found = 1;
1591 break;
1592 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001593 }
1594
1595 if (found == 0) {
1596 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1597 if (num >= ndomains) {
1598 spin_unlock_irqrestore(&iommu->lock, flags);
1599 printk(KERN_ERR "IOMMU: no free domain ids\n");
1600 return -EFAULT;
1601 }
1602
1603 set_bit(num, iommu->domain_ids);
1604 iommu->domains[num] = domain;
1605 id = num;
1606 }
1607
1608 /* Skip top levels of page tables for
1609 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001610 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001611 */
Chris Wright1672af12009-12-02 12:06:34 -08001612 if (translation != CONTEXT_TT_PASS_THROUGH) {
1613 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1614 pgd = phys_to_virt(dma_pte_addr(pgd));
1615 if (!dma_pte_present(pgd)) {
1616 spin_unlock_irqrestore(&iommu->lock, flags);
1617 return -ENOMEM;
1618 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001619 }
1620 }
1621 }
1622
1623 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001624
Yu Zhao93a23a72009-05-18 13:51:37 +08001625 if (translation != CONTEXT_TT_PASS_THROUGH) {
1626 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1627 translation = info ? CONTEXT_TT_DEV_IOTLB :
1628 CONTEXT_TT_MULTI_LEVEL;
1629 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001630 /*
1631 * In pass through mode, AW must be programmed to indicate the largest
1632 * AGAW value supported by hardware. And ASR is ignored by hardware.
1633 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001634 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001635 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001636 else {
1637 context_set_address_root(context, virt_to_phys(pgd));
1638 context_set_address_width(context, iommu->agaw);
1639 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001640
1641 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001642 context_set_fault_enable(context);
1643 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001644 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001645
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001646 /*
1647 * It's a non-present to present mapping. If hardware doesn't cache
1648 * non-present entry we only need to flush the write-buffer. If the
1649 * _does_ cache non-present entries, then it does so in the special
1650 * domain #0, which we have to flush:
1651 */
1652 if (cap_caching_mode(iommu->cap)) {
1653 iommu->flush.flush_context(iommu, 0,
1654 (((u16)bus) << 8) | devfn,
1655 DMA_CCMD_MASK_NOBIT,
1656 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001657 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001658 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001659 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001660 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001661 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001662 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001663
1664 spin_lock_irqsave(&domain->iommu_lock, flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08001665 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08001666 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001667 if (domain->iommu_count == 1)
1668 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001669 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001670 }
1671 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001672 return 0;
1673}
1674
1675static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001676domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1677 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001678{
1679 int ret;
1680 struct pci_dev *tmp, *parent;
1681
David Woodhouse276dbf992009-04-04 01:45:37 +01001682 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001683 pdev->bus->number, pdev->devfn,
1684 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001685 if (ret)
1686 return ret;
1687
1688 /* dependent device mapping */
1689 tmp = pci_find_upstream_pcie_bridge(pdev);
1690 if (!tmp)
1691 return 0;
1692 /* Secondary interface's bus number and devfn 0 */
1693 parent = pdev->bus->self;
1694 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001695 ret = domain_context_mapping_one(domain,
1696 pci_domain_nr(parent->bus),
1697 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001698 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001699 if (ret)
1700 return ret;
1701 parent = parent->bus->self;
1702 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001703 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001704 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001705 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001706 tmp->subordinate->number, 0,
1707 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001708 else /* this is a legacy PCI bridge */
1709 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001710 pci_domain_nr(tmp->bus),
1711 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001712 tmp->devfn,
1713 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001714}
1715
Weidong Han5331fe62008-12-08 23:00:00 +08001716static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001717{
1718 int ret;
1719 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001720 struct intel_iommu *iommu;
1721
David Woodhouse276dbf992009-04-04 01:45:37 +01001722 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1723 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001724 if (!iommu)
1725 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001726
David Woodhouse276dbf992009-04-04 01:45:37 +01001727 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001728 if (!ret)
1729 return ret;
1730 /* dependent device mapping */
1731 tmp = pci_find_upstream_pcie_bridge(pdev);
1732 if (!tmp)
1733 return ret;
1734 /* Secondary interface's bus number and devfn 0 */
1735 parent = pdev->bus->self;
1736 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001737 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001738 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001739 if (!ret)
1740 return ret;
1741 parent = parent->bus->self;
1742 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001743 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001744 return device_context_mapped(iommu, tmp->subordinate->number,
1745 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001746 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001747 return device_context_mapped(iommu, tmp->bus->number,
1748 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001749}
1750
Fenghua Yuf5329592009-08-04 15:09:37 -07001751/* Returns a number of VTD pages, but aligned to MM page size */
1752static inline unsigned long aligned_nrpages(unsigned long host_addr,
1753 size_t size)
1754{
1755 host_addr &= ~PAGE_MASK;
1756 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1757}
1758
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001759/* Return largest possible superpage level for a given mapping */
1760static inline int hardware_largepage_caps(struct dmar_domain *domain,
1761 unsigned long iov_pfn,
1762 unsigned long phy_pfn,
1763 unsigned long pages)
1764{
1765 int support, level = 1;
1766 unsigned long pfnmerge;
1767
1768 support = domain->iommu_superpage;
1769
1770 /* To use a large page, the virtual *and* physical addresses
1771 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1772 of them will mean we have to use smaller pages. So just
1773 merge them and check both at once. */
1774 pfnmerge = iov_pfn | phy_pfn;
1775
1776 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1777 pages >>= VTD_STRIDE_SHIFT;
1778 if (!pages)
1779 break;
1780 pfnmerge >>= VTD_STRIDE_SHIFT;
1781 level++;
1782 support--;
1783 }
1784 return level;
1785}
1786
David Woodhouse9051aa02009-06-29 12:30:54 +01001787static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1788 struct scatterlist *sg, unsigned long phys_pfn,
1789 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001790{
1791 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001792 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001793 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001794 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001795 unsigned int largepage_lvl = 0;
1796 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001797
1798 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1799
1800 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1801 return -EINVAL;
1802
1803 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1804
David Woodhouse9051aa02009-06-29 12:30:54 +01001805 if (sg)
1806 sg_res = 0;
1807 else {
1808 sg_res = nr_pages + 1;
1809 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1810 }
1811
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001812 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001813 uint64_t tmp;
1814
David Woodhousee1605492009-06-29 11:17:38 +01001815 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001816 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001817 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1818 sg->dma_length = sg->length;
1819 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001820 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001821 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001822
David Woodhousee1605492009-06-29 11:17:38 +01001823 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001824 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1825
1826 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01001827 if (!pte)
1828 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001829 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001830 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001831 pteval |= DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001832 /* Ensure that old small page tables are removed to make room
1833 for superpage, if they exist. */
1834 dma_pte_clear_range(domain, iov_pfn,
1835 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1836 dma_pte_free_pagetable(domain, iov_pfn,
1837 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1838 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001839 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001840 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001841
David Woodhousee1605492009-06-29 11:17:38 +01001842 }
1843 /* We don't need lock here, nobody else
1844 * touches the iova range
1845 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001846 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001847 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001848 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001849 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1850 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001851 if (dumps) {
1852 dumps--;
1853 debug_dma_dump_mappings(NULL);
1854 }
1855 WARN_ON(1);
1856 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001857
1858 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1859
1860 BUG_ON(nr_pages < lvl_pages);
1861 BUG_ON(sg_res < lvl_pages);
1862
1863 nr_pages -= lvl_pages;
1864 iov_pfn += lvl_pages;
1865 phys_pfn += lvl_pages;
1866 pteval += lvl_pages * VTD_PAGE_SIZE;
1867 sg_res -= lvl_pages;
1868
1869 /* If the next PTE would be the first in a new page, then we
1870 need to flush the cache on the entries we've just written.
1871 And then we'll need to recalculate 'pte', so clear it and
1872 let it get set again in the if (!pte) block above.
1873
1874 If we're done (!nr_pages) we need to flush the cache too.
1875
1876 Also if we've been setting superpages, we may need to
1877 recalculate 'pte' and switch back to smaller pages for the
1878 end of the mapping, if the trailing size is not enough to
1879 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01001880 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001881 if (!nr_pages || first_pte_in_page(pte) ||
1882 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01001883 domain_flush_cache(domain, first_pte,
1884 (void *)pte - (void *)first_pte);
1885 pte = NULL;
1886 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001887
1888 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01001889 sg = sg_next(sg);
1890 }
1891 return 0;
1892}
1893
David Woodhouse9051aa02009-06-29 12:30:54 +01001894static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1895 struct scatterlist *sg, unsigned long nr_pages,
1896 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001897{
David Woodhouse9051aa02009-06-29 12:30:54 +01001898 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1899}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001900
David Woodhouse9051aa02009-06-29 12:30:54 +01001901static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1902 unsigned long phys_pfn, unsigned long nr_pages,
1903 int prot)
1904{
1905 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001906}
1907
Weidong Hanc7151a82008-12-08 22:51:37 +08001908static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001909{
Weidong Hanc7151a82008-12-08 22:51:37 +08001910 if (!iommu)
1911 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001912
1913 clear_context_table(iommu, bus, devfn);
1914 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001915 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001916 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001917}
1918
David Woodhouse109b9b02012-05-25 17:43:02 +01001919static inline void unlink_domain_info(struct device_domain_info *info)
1920{
1921 assert_spin_locked(&device_domain_lock);
1922 list_del(&info->link);
1923 list_del(&info->global);
1924 if (info->dev)
1925 info->dev->dev.archdata.iommu = NULL;
1926}
1927
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001928static void domain_remove_dev_info(struct dmar_domain *domain)
1929{
1930 struct device_domain_info *info;
1931 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001932 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001933
1934 spin_lock_irqsave(&device_domain_lock, flags);
1935 while (!list_empty(&domain->devices)) {
1936 info = list_entry(domain->devices.next,
1937 struct device_domain_info, link);
David Woodhouse109b9b02012-05-25 17:43:02 +01001938 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001939 spin_unlock_irqrestore(&device_domain_lock, flags);
1940
Yu Zhao93a23a72009-05-18 13:51:37 +08001941 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001942 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001943 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001944 free_devinfo_mem(info);
1945
1946 spin_lock_irqsave(&device_domain_lock, flags);
1947 }
1948 spin_unlock_irqrestore(&device_domain_lock, flags);
1949}
1950
1951/*
1952 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001953 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001954 */
Kay, Allen M38717942008-09-09 18:37:29 +03001955static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001956find_domain(struct pci_dev *pdev)
1957{
1958 struct device_domain_info *info;
1959
1960 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001961 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001962 if (info)
1963 return info->domain;
1964 return NULL;
1965}
1966
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001967/* domain is initialized */
1968static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1969{
1970 struct dmar_domain *domain, *found = NULL;
1971 struct intel_iommu *iommu;
1972 struct dmar_drhd_unit *drhd;
1973 struct device_domain_info *info, *tmp;
1974 struct pci_dev *dev_tmp;
1975 unsigned long flags;
1976 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001977 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001978 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001979
1980 domain = find_domain(pdev);
1981 if (domain)
1982 return domain;
1983
David Woodhouse276dbf992009-04-04 01:45:37 +01001984 segment = pci_domain_nr(pdev->bus);
1985
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001986 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1987 if (dev_tmp) {
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001988 if (pci_is_pcie(dev_tmp)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001989 bus = dev_tmp->subordinate->number;
1990 devfn = 0;
1991 } else {
1992 bus = dev_tmp->bus->number;
1993 devfn = dev_tmp->devfn;
1994 }
1995 spin_lock_irqsave(&device_domain_lock, flags);
1996 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001997 if (info->segment == segment &&
1998 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001999 found = info->domain;
2000 break;
2001 }
2002 }
2003 spin_unlock_irqrestore(&device_domain_lock, flags);
2004 /* pcie-pci bridge already has a domain, uses it */
2005 if (found) {
2006 domain = found;
2007 goto found_domain;
2008 }
2009 }
2010
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002011 domain = alloc_domain();
2012 if (!domain)
2013 goto error;
2014
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002015 /* Allocate new domain for the device */
2016 drhd = dmar_find_matched_drhd_unit(pdev);
2017 if (!drhd) {
2018 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
2019 pci_name(pdev));
Julia Lawalld2900bd2012-07-24 16:18:14 +02002020 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002021 return NULL;
2022 }
2023 iommu = drhd->iommu;
2024
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002025 ret = iommu_attach_domain(domain, iommu);
2026 if (ret) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002027 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002028 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002029 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002030
2031 if (domain_init(domain, gaw)) {
2032 domain_exit(domain);
2033 goto error;
2034 }
2035
2036 /* register pcie-to-pci device */
2037 if (dev_tmp) {
2038 info = alloc_devinfo_mem();
2039 if (!info) {
2040 domain_exit(domain);
2041 goto error;
2042 }
David Woodhouse276dbf992009-04-04 01:45:37 +01002043 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002044 info->bus = bus;
2045 info->devfn = devfn;
2046 info->dev = NULL;
2047 info->domain = domain;
2048 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08002049 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002050
2051 /* pcie-to-pci bridge already has a domain, uses it */
2052 found = NULL;
2053 spin_lock_irqsave(&device_domain_lock, flags);
2054 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01002055 if (tmp->segment == segment &&
2056 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002057 found = tmp->domain;
2058 break;
2059 }
2060 }
2061 if (found) {
Jiri Slaby00dfff72010-06-14 17:17:32 +02002062 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002063 free_devinfo_mem(info);
2064 domain_exit(domain);
2065 domain = found;
2066 } else {
2067 list_add(&info->link, &domain->devices);
2068 list_add(&info->global, &device_domain_list);
Jiri Slaby00dfff72010-06-14 17:17:32 +02002069 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002070 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002071 }
2072
2073found_domain:
2074 info = alloc_devinfo_mem();
2075 if (!info)
2076 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01002077 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002078 info->bus = pdev->bus->number;
2079 info->devfn = pdev->devfn;
2080 info->dev = pdev;
2081 info->domain = domain;
2082 spin_lock_irqsave(&device_domain_lock, flags);
2083 /* somebody is fast */
2084 found = find_domain(pdev);
2085 if (found != NULL) {
2086 spin_unlock_irqrestore(&device_domain_lock, flags);
2087 if (found != domain) {
2088 domain_exit(domain);
2089 domain = found;
2090 }
2091 free_devinfo_mem(info);
2092 return domain;
2093 }
2094 list_add(&info->link, &domain->devices);
2095 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002096 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002097 spin_unlock_irqrestore(&device_domain_lock, flags);
2098 return domain;
2099error:
2100 /* recheck it here, maybe others set it */
2101 return find_domain(pdev);
2102}
2103
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002104static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002105#define IDENTMAP_ALL 1
2106#define IDENTMAP_GFX 2
2107#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002108
David Woodhouseb2132032009-06-26 18:50:28 +01002109static int iommu_domain_identity_map(struct dmar_domain *domain,
2110 unsigned long long start,
2111 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002112{
David Woodhousec5395d52009-06-28 16:35:56 +01002113 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2114 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002115
David Woodhousec5395d52009-06-28 16:35:56 +01002116 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2117 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002118 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002119 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002120 }
2121
David Woodhousec5395d52009-06-28 16:35:56 +01002122 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2123 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002124 /*
2125 * RMRR range might have overlap with physical memory range,
2126 * clear it first
2127 */
David Woodhousec5395d52009-06-28 16:35:56 +01002128 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002129
David Woodhousec5395d52009-06-28 16:35:56 +01002130 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2131 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002132 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002133}
2134
2135static int iommu_prepare_identity_map(struct pci_dev *pdev,
2136 unsigned long long start,
2137 unsigned long long end)
2138{
2139 struct dmar_domain *domain;
2140 int ret;
2141
David Woodhousec7ab48d2009-06-26 19:10:36 +01002142 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002143 if (!domain)
2144 return -ENOMEM;
2145
David Woodhouse19943b02009-08-04 16:19:20 +01002146 /* For _hardware_ passthrough, don't bother. But for software
2147 passthrough, we do it anyway -- it may indicate a memory
2148 range which is reserved in E820, so which didn't get set
2149 up to start with in si_domain */
2150 if (domain == si_domain && hw_pass_through) {
2151 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2152 pci_name(pdev), start, end);
2153 return 0;
2154 }
2155
2156 printk(KERN_INFO
2157 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2158 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002159
David Woodhouse5595b522009-12-02 09:21:55 +00002160 if (end < start) {
2161 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2162 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2163 dmi_get_system_info(DMI_BIOS_VENDOR),
2164 dmi_get_system_info(DMI_BIOS_VERSION),
2165 dmi_get_system_info(DMI_PRODUCT_VERSION));
2166 ret = -EIO;
2167 goto error;
2168 }
2169
David Woodhouse2ff729f2009-08-26 14:25:41 +01002170 if (end >> agaw_to_width(domain->agaw)) {
2171 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2172 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2173 agaw_to_width(domain->agaw),
2174 dmi_get_system_info(DMI_BIOS_VENDOR),
2175 dmi_get_system_info(DMI_BIOS_VERSION),
2176 dmi_get_system_info(DMI_PRODUCT_VERSION));
2177 ret = -EIO;
2178 goto error;
2179 }
David Woodhouse19943b02009-08-04 16:19:20 +01002180
David Woodhouseb2132032009-06-26 18:50:28 +01002181 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002182 if (ret)
2183 goto error;
2184
2185 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002186 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002187 if (ret)
2188 goto error;
2189
2190 return 0;
2191
2192 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002193 domain_exit(domain);
2194 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002195}
2196
2197static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2198 struct pci_dev *pdev)
2199{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002200 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002201 return 0;
2202 return iommu_prepare_identity_map(pdev, rmrr->base_address,
David Woodhouse70e535d2011-05-31 00:22:52 +01002203 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002204}
2205
Suresh Siddhad3f13812011-08-23 17:05:25 -07002206#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002207static inline void iommu_prepare_isa(void)
2208{
2209 struct pci_dev *pdev;
2210 int ret;
2211
2212 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2213 if (!pdev)
2214 return;
2215
David Woodhousec7ab48d2009-06-26 19:10:36 +01002216 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse70e535d2011-05-31 00:22:52 +01002217 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002218
2219 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002220 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2221 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002222
2223}
2224#else
2225static inline void iommu_prepare_isa(void)
2226{
2227 return;
2228}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002229#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002230
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002231static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002232
Matt Kraai071e1372009-08-23 22:30:22 -07002233static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002234{
2235 struct dmar_drhd_unit *drhd;
2236 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002237 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002238
2239 si_domain = alloc_domain();
2240 if (!si_domain)
2241 return -EFAULT;
2242
David Woodhousec7ab48d2009-06-26 19:10:36 +01002243 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002244
2245 for_each_active_iommu(iommu, drhd) {
2246 ret = iommu_attach_domain(si_domain, iommu);
2247 if (ret) {
2248 domain_exit(si_domain);
2249 return -EFAULT;
2250 }
2251 }
2252
2253 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2254 domain_exit(si_domain);
2255 return -EFAULT;
2256 }
2257
2258 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2259
David Woodhouse19943b02009-08-04 16:19:20 +01002260 if (hw)
2261 return 0;
2262
David Woodhousec7ab48d2009-06-26 19:10:36 +01002263 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002264 unsigned long start_pfn, end_pfn;
2265 int i;
2266
2267 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2268 ret = iommu_domain_identity_map(si_domain,
2269 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2270 if (ret)
2271 return ret;
2272 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002273 }
2274
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002275 return 0;
2276}
2277
2278static void domain_remove_one_dev_info(struct dmar_domain *domain,
2279 struct pci_dev *pdev);
2280static int identity_mapping(struct pci_dev *pdev)
2281{
2282 struct device_domain_info *info;
2283
2284 if (likely(!iommu_identity_mapping))
2285 return 0;
2286
Mike Traviscb452a42011-05-28 13:15:03 -05002287 info = pdev->dev.archdata.iommu;
2288 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2289 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002290
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002291 return 0;
2292}
2293
2294static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002295 struct pci_dev *pdev,
2296 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002297{
2298 struct device_domain_info *info;
2299 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002300 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002301
2302 info = alloc_devinfo_mem();
2303 if (!info)
2304 return -ENOMEM;
2305
2306 info->segment = pci_domain_nr(pdev->bus);
2307 info->bus = pdev->bus->number;
2308 info->devfn = pdev->devfn;
2309 info->dev = pdev;
2310 info->domain = domain;
2311
2312 spin_lock_irqsave(&device_domain_lock, flags);
2313 list_add(&info->link, &domain->devices);
2314 list_add(&info->global, &device_domain_list);
2315 pdev->dev.archdata.iommu = info;
2316 spin_unlock_irqrestore(&device_domain_lock, flags);
2317
David Woodhousee2ad23d2012-05-25 17:42:54 +01002318 ret = domain_context_mapping(domain, pdev, translation);
2319 if (ret) {
2320 spin_lock_irqsave(&device_domain_lock, flags);
David Woodhouse109b9b02012-05-25 17:43:02 +01002321 unlink_domain_info(info);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002322 spin_unlock_irqrestore(&device_domain_lock, flags);
2323 free_devinfo_mem(info);
2324 return ret;
2325 }
2326
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002327 return 0;
2328}
2329
David Woodhouse6941af22009-07-04 18:24:27 +01002330static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2331{
David Woodhousee0fc7e02009-09-30 09:12:17 -07002332 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2333 return 1;
2334
2335 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2336 return 1;
2337
2338 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2339 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002340
David Woodhouse3dfc8132009-07-04 19:11:08 +01002341 /*
2342 * We want to start off with all devices in the 1:1 domain, and
2343 * take them out later if we find they can't access all of memory.
2344 *
2345 * However, we can't do this for PCI devices behind bridges,
2346 * because all PCI devices behind the same bridge will end up
2347 * with the same source-id on their transactions.
2348 *
2349 * Practically speaking, we can't change things around for these
2350 * devices at run-time, because we can't be sure there'll be no
2351 * DMA transactions in flight for any of their siblings.
2352 *
2353 * So PCI devices (unless they're on the root bus) as well as
2354 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2355 * the 1:1 domain, just in _case_ one of their siblings turns out
2356 * not to be able to map all of memory.
2357 */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002358 if (!pci_is_pcie(pdev)) {
David Woodhouse3dfc8132009-07-04 19:11:08 +01002359 if (!pci_is_root_bus(pdev->bus))
2360 return 0;
2361 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2362 return 0;
Yijing Wang62f87c02012-07-24 17:20:03 +08002363 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
David Woodhouse3dfc8132009-07-04 19:11:08 +01002364 return 0;
2365
2366 /*
2367 * At boot time, we don't yet know if devices will be 64-bit capable.
2368 * Assume that they will -- if they turn out not to be, then we can
2369 * take them out of the 1:1 domain later.
2370 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002371 if (!startup) {
2372 /*
2373 * If the device's dma_mask is less than the system's memory
2374 * size then this is not a candidate for identity mapping.
2375 */
2376 u64 dma_mask = pdev->dma_mask;
2377
2378 if (pdev->dev.coherent_dma_mask &&
2379 pdev->dev.coherent_dma_mask < dma_mask)
2380 dma_mask = pdev->dev.coherent_dma_mask;
2381
2382 return dma_mask >= dma_get_required_mask(&pdev->dev);
2383 }
David Woodhouse6941af22009-07-04 18:24:27 +01002384
2385 return 1;
2386}
2387
Matt Kraai071e1372009-08-23 22:30:22 -07002388static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002389{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002390 struct pci_dev *pdev = NULL;
2391 int ret;
2392
David Woodhouse19943b02009-08-04 16:19:20 +01002393 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002394 if (ret)
2395 return -EFAULT;
2396
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002397 for_each_pci_dev(pdev) {
David Woodhouse6941af22009-07-04 18:24:27 +01002398 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse5fe60f42009-08-09 10:53:41 +01002399 ret = domain_add_dev_info(si_domain, pdev,
Mike Traviseae460b2012-03-05 15:05:16 -08002400 hw ? CONTEXT_TT_PASS_THROUGH :
2401 CONTEXT_TT_MULTI_LEVEL);
2402 if (ret) {
2403 /* device not associated with an iommu */
2404 if (ret == -ENODEV)
2405 continue;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002406 return ret;
Mike Traviseae460b2012-03-05 15:05:16 -08002407 }
2408 pr_info("IOMMU: %s identity mapping for device %s\n",
2409 hw ? "hardware" : "software", pci_name(pdev));
David Woodhouse62edf5d2009-07-04 10:59:46 +01002410 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002411 }
2412
2413 return 0;
2414}
2415
Joseph Cihulab7792602011-05-03 00:08:37 -07002416static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002417{
2418 struct dmar_drhd_unit *drhd;
2419 struct dmar_rmrr_unit *rmrr;
2420 struct pci_dev *pdev;
2421 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002422 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002423
2424 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002425 * for each drhd
2426 * allocate root
2427 * initialize and program root entry to not present
2428 * endfor
2429 */
2430 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002431 /*
2432 * lock not needed as this is only incremented in the single
2433 * threaded kernel __init code path all other access are read
2434 * only
2435 */
Mike Travis1b198bb2012-03-05 15:05:16 -08002436 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2437 g_num_of_iommus++;
2438 continue;
2439 }
2440 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2441 IOMMU_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002442 }
2443
Weidong Hand9630fe2008-12-08 11:06:32 +08002444 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2445 GFP_KERNEL);
2446 if (!g_iommus) {
2447 printk(KERN_ERR "Allocating global iommu array failed\n");
2448 ret = -ENOMEM;
2449 goto error;
2450 }
2451
mark gross80b20dd2008-04-18 13:53:58 -07002452 deferred_flush = kzalloc(g_num_of_iommus *
2453 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2454 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002455 ret = -ENOMEM;
2456 goto error;
2457 }
2458
mark gross5e0d2a62008-03-04 15:22:08 -08002459 for_each_drhd_unit(drhd) {
2460 if (drhd->ignored)
2461 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002462
2463 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002464 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002465
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002466 ret = iommu_init_domains(iommu);
2467 if (ret)
2468 goto error;
2469
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002470 /*
2471 * TBD:
2472 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002473 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002474 */
2475 ret = iommu_alloc_root_entry(iommu);
2476 if (ret) {
2477 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2478 goto error;
2479 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002480 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002481 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002482 }
2483
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002484 /*
2485 * Start from the sane iommu hardware state.
2486 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002487 for_each_drhd_unit(drhd) {
2488 if (drhd->ignored)
2489 continue;
2490
2491 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002492
2493 /*
2494 * If the queued invalidation is already initialized by us
2495 * (for example, while enabling interrupt-remapping) then
2496 * we got the things already rolling from a sane state.
2497 */
2498 if (iommu->qi)
2499 continue;
2500
2501 /*
2502 * Clear any previous faults.
2503 */
2504 dmar_fault(-1, iommu);
2505 /*
2506 * Disable queued invalidation if supported and already enabled
2507 * before OS handover.
2508 */
2509 dmar_disable_qi(iommu);
2510 }
2511
2512 for_each_drhd_unit(drhd) {
2513 if (drhd->ignored)
2514 continue;
2515
2516 iommu = drhd->iommu;
2517
Youquan Songa77b67d2008-10-16 16:31:56 -07002518 if (dmar_enable_qi(iommu)) {
2519 /*
2520 * Queued Invalidate not enabled, use Register Based
2521 * Invalidate
2522 */
2523 iommu->flush.flush_context = __iommu_flush_context;
2524 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002525 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002526 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002527 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002528 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002529 } else {
2530 iommu->flush.flush_context = qi_flush_context;
2531 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002532 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002533 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002534 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002535 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002536 }
2537 }
2538
David Woodhouse19943b02009-08-04 16:19:20 +01002539 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002540 iommu_identity_mapping |= IDENTMAP_ALL;
2541
Suresh Siddhad3f13812011-08-23 17:05:25 -07002542#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002543 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002544#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002545
2546 check_tylersburg_isoch();
2547
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002548 /*
2549 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002550 * identity mappings for rmrr, gfx, and isa and may fall back to static
2551 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002552 */
David Woodhouse19943b02009-08-04 16:19:20 +01002553 if (iommu_identity_mapping) {
2554 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2555 if (ret) {
2556 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2557 goto error;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002558 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002559 }
David Woodhouse19943b02009-08-04 16:19:20 +01002560 /*
2561 * For each rmrr
2562 * for each dev attached to rmrr
2563 * do
2564 * locate drhd for dev, alloc domain for dev
2565 * allocate free domain
2566 * allocate page table entries for rmrr
2567 * if context not allocated for bus
2568 * allocate and init context
2569 * set present in root table for this bus
2570 * init context with domain, translation etc
2571 * endfor
2572 * endfor
2573 */
2574 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2575 for_each_rmrr_units(rmrr) {
2576 for (i = 0; i < rmrr->devices_cnt; i++) {
2577 pdev = rmrr->devices[i];
2578 /*
2579 * some BIOS lists non-exist devices in DMAR
2580 * table.
2581 */
2582 if (!pdev)
2583 continue;
2584 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2585 if (ret)
2586 printk(KERN_ERR
2587 "IOMMU: mapping reserved region failed\n");
2588 }
2589 }
2590
2591 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002592
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002593 /*
2594 * for each drhd
2595 * enable fault log
2596 * global invalidate context cache
2597 * global invalidate iotlb
2598 * enable translation
2599 */
2600 for_each_drhd_unit(drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002601 if (drhd->ignored) {
2602 /*
2603 * we always have to disable PMRs or DMA may fail on
2604 * this device
2605 */
2606 if (force_on)
2607 iommu_disable_protect_mem_regions(drhd->iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002608 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002609 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002610 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002611
2612 iommu_flush_write_buffer(iommu);
2613
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002614 ret = dmar_set_interrupt(iommu);
2615 if (ret)
2616 goto error;
2617
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002618 iommu_set_root_entry(iommu);
2619
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002620 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002621 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002622
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002623 ret = iommu_enable_translation(iommu);
2624 if (ret)
2625 goto error;
David Woodhouseb94996c2009-09-19 15:28:12 -07002626
2627 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002628 }
2629
2630 return 0;
2631error:
2632 for_each_drhd_unit(drhd) {
2633 if (drhd->ignored)
2634 continue;
2635 iommu = drhd->iommu;
2636 free_iommu(iommu);
2637 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002638 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002639 return ret;
2640}
2641
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002642/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002643static struct iova *intel_alloc_iova(struct device *dev,
2644 struct dmar_domain *domain,
2645 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002646{
2647 struct pci_dev *pdev = to_pci_dev(dev);
2648 struct iova *iova = NULL;
2649
David Woodhouse875764d2009-06-28 21:20:51 +01002650 /* Restrict dma_mask to the width that the iommu can handle */
2651 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2652
2653 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002654 /*
2655 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002656 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002657 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002658 */
David Woodhouse875764d2009-06-28 21:20:51 +01002659 iova = alloc_iova(&domain->iovad, nrpages,
2660 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2661 if (iova)
2662 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002663 }
David Woodhouse875764d2009-06-28 21:20:51 +01002664 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2665 if (unlikely(!iova)) {
2666 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2667 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002668 return NULL;
2669 }
2670
2671 return iova;
2672}
2673
David Woodhouse147202a2009-07-07 19:43:20 +01002674static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002675{
2676 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002677 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002678
2679 domain = get_domain_for_dev(pdev,
2680 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2681 if (!domain) {
2682 printk(KERN_ERR
2683 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002684 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002685 }
2686
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002687 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002688 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002689 ret = domain_context_mapping(domain, pdev,
2690 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002691 if (ret) {
2692 printk(KERN_ERR
2693 "Domain context map for %s failed",
2694 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002695 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002696 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002697 }
2698
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002699 return domain;
2700}
2701
David Woodhouse147202a2009-07-07 19:43:20 +01002702static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2703{
2704 struct device_domain_info *info;
2705
2706 /* No lock here, assumes no domain exit in normal case */
2707 info = dev->dev.archdata.iommu;
2708 if (likely(info))
2709 return info->domain;
2710
2711 return __get_valid_domain_for_dev(dev);
2712}
2713
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002714static int iommu_dummy(struct pci_dev *pdev)
2715{
2716 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2717}
2718
2719/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002720static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002721{
David Woodhouse73676832009-07-04 14:08:36 +01002722 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002723 int found;
2724
David Woodhouse73676832009-07-04 14:08:36 +01002725 if (unlikely(dev->bus != &pci_bus_type))
2726 return 1;
2727
2728 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002729 if (iommu_dummy(pdev))
2730 return 1;
2731
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002732 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002733 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002734
2735 found = identity_mapping(pdev);
2736 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002737 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002738 return 1;
2739 else {
2740 /*
2741 * 32 bit DMA is removed from si_domain and fall back
2742 * to non-identity mapping.
2743 */
2744 domain_remove_one_dev_info(si_domain, pdev);
2745 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2746 pci_name(pdev));
2747 return 0;
2748 }
2749 } else {
2750 /*
2751 * In case of a detached 64 bit DMA device from vm, the device
2752 * is put into si_domain for identity mapping.
2753 */
David Woodhouse6941af22009-07-04 18:24:27 +01002754 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002755 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002756 ret = domain_add_dev_info(si_domain, pdev,
2757 hw_pass_through ?
2758 CONTEXT_TT_PASS_THROUGH :
2759 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002760 if (!ret) {
2761 printk(KERN_INFO "64bit %s uses identity mapping\n",
2762 pci_name(pdev));
2763 return 1;
2764 }
2765 }
2766 }
2767
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002768 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002769}
2770
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002771static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2772 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002773{
2774 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002775 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002776 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002777 struct iova *iova;
2778 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002779 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002780 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002781 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002782
2783 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002784
David Woodhouse73676832009-07-04 14:08:36 +01002785 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002786 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002787
2788 domain = get_valid_domain_for_dev(pdev);
2789 if (!domain)
2790 return 0;
2791
Weidong Han8c11e792008-12-08 15:29:22 +08002792 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002793 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002794
Mike Travisc681d0b2011-05-28 13:15:05 -05002795 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002796 if (!iova)
2797 goto error;
2798
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002799 /*
2800 * Check if DMAR supports zero-length reads on write only
2801 * mappings..
2802 */
2803 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002804 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002805 prot |= DMA_PTE_READ;
2806 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2807 prot |= DMA_PTE_WRITE;
2808 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002809 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002810 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002811 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002812 * is not a big problem
2813 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002814 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002815 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002816 if (ret)
2817 goto error;
2818
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002819 /* it's a non-present to present mapping. Only flush if caching mode */
2820 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002821 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002822 else
Weidong Han8c11e792008-12-08 15:29:22 +08002823 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002824
David Woodhouse03d6a242009-06-28 15:33:46 +01002825 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2826 start_paddr += paddr & ~PAGE_MASK;
2827 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002828
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002829error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002830 if (iova)
2831 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002832 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002833 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002834 return 0;
2835}
2836
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002837static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2838 unsigned long offset, size_t size,
2839 enum dma_data_direction dir,
2840 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002841{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002842 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2843 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002844}
2845
mark gross5e0d2a62008-03-04 15:22:08 -08002846static void flush_unmaps(void)
2847{
mark gross80b20dd2008-04-18 13:53:58 -07002848 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002849
mark gross5e0d2a62008-03-04 15:22:08 -08002850 timer_on = 0;
2851
2852 /* just flush them all */
2853 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002854 struct intel_iommu *iommu = g_iommus[i];
2855 if (!iommu)
2856 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002857
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002858 if (!deferred_flush[i].next)
2859 continue;
2860
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002861 /* In caching mode, global flushes turn emulation expensive */
2862 if (!cap_caching_mode(iommu->cap))
2863 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002864 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002865 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002866 unsigned long mask;
2867 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002868 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08002869
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002870 /* On real hardware multiple invalidations are expensive */
2871 if (cap_caching_mode(iommu->cap))
2872 iommu_flush_iotlb_psi(iommu, domain->id,
2873 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2874 else {
2875 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2876 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2877 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2878 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002879 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002880 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002881 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002882 }
2883
mark gross5e0d2a62008-03-04 15:22:08 -08002884 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002885}
2886
2887static void flush_unmaps_timeout(unsigned long data)
2888{
mark gross80b20dd2008-04-18 13:53:58 -07002889 unsigned long flags;
2890
2891 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002892 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002893 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002894}
2895
2896static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2897{
2898 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002899 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002900 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002901
2902 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002903 if (list_size == HIGH_WATER_MARK)
2904 flush_unmaps();
2905
Weidong Han8c11e792008-12-08 15:29:22 +08002906 iommu = domain_get_iommu(dom);
2907 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002908
mark gross80b20dd2008-04-18 13:53:58 -07002909 next = deferred_flush[iommu_id].next;
2910 deferred_flush[iommu_id].domain[next] = dom;
2911 deferred_flush[iommu_id].iova[next] = iova;
2912 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002913
2914 if (!timer_on) {
2915 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2916 timer_on = 1;
2917 }
2918 list_size++;
2919 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2920}
2921
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002922static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2923 size_t size, enum dma_data_direction dir,
2924 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002925{
2926 struct pci_dev *pdev = to_pci_dev(dev);
2927 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002928 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002929 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002930 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002931
David Woodhouse73676832009-07-04 14:08:36 +01002932 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002933 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002934
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002935 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002936 BUG_ON(!domain);
2937
Weidong Han8c11e792008-12-08 15:29:22 +08002938 iommu = domain_get_iommu(domain);
2939
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002940 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002941 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2942 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002943 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002944
David Woodhoused794dc92009-06-28 00:27:49 +01002945 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2946 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002947
David Woodhoused794dc92009-06-28 00:27:49 +01002948 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2949 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002950
2951 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002952 dma_pte_clear_range(domain, start_pfn, last_pfn);
2953
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002954 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002955 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2956
mark gross5e0d2a62008-03-04 15:22:08 -08002957 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002958 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002959 last_pfn - start_pfn + 1, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08002960 /* free iova */
2961 __free_iova(&domain->iovad, iova);
2962 } else {
2963 add_unmap(domain, iova);
2964 /*
2965 * queue up the release of the unmap to save the 1/6th of the
2966 * cpu used up by the iotlb flush operation...
2967 */
mark gross5e0d2a62008-03-04 15:22:08 -08002968 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002969}
2970
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002971static void *intel_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02002972 dma_addr_t *dma_handle, gfp_t flags,
2973 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002974{
2975 void *vaddr;
2976 int order;
2977
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002978 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002979 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07002980
2981 if (!iommu_no_mapping(hwdev))
2982 flags &= ~(GFP_DMA | GFP_DMA32);
2983 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2984 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2985 flags |= GFP_DMA;
2986 else
2987 flags |= GFP_DMA32;
2988 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002989
2990 vaddr = (void *)__get_free_pages(flags, order);
2991 if (!vaddr)
2992 return NULL;
2993 memset(vaddr, 0, size);
2994
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002995 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2996 DMA_BIDIRECTIONAL,
2997 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002998 if (*dma_handle)
2999 return vaddr;
3000 free_pages((unsigned long)vaddr, order);
3001 return NULL;
3002}
3003
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003004static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003005 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003006{
3007 int order;
3008
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003009 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003010 order = get_order(size);
3011
David Woodhouse0db9b7a2009-07-14 02:01:57 +01003012 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003013 free_pages((unsigned long)vaddr, order);
3014}
3015
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003016static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
3017 int nelems, enum dma_data_direction dir,
3018 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003019{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003020 struct pci_dev *pdev = to_pci_dev(hwdev);
3021 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003022 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003023 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003024 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003025
David Woodhouse73676832009-07-04 14:08:36 +01003026 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003027 return;
3028
3029 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08003030 BUG_ON(!domain);
3031
3032 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003033
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003034 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01003035 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3036 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003037 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003038
David Woodhoused794dc92009-06-28 00:27:49 +01003039 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3040 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003041
3042 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01003043 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003044
David Woodhoused794dc92009-06-28 00:27:49 +01003045 /* free page tables */
3046 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3047
David Woodhouseacea0012009-07-14 01:55:11 +01003048 if (intel_iommu_strict) {
3049 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03003050 last_pfn - start_pfn + 1, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003051 /* free iova */
3052 __free_iova(&domain->iovad, iova);
3053 } else {
3054 add_unmap(domain, iova);
3055 /*
3056 * queue up the release of the unmap to save the 1/6th of the
3057 * cpu used up by the iotlb flush operation...
3058 */
3059 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003060}
3061
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003062static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003063 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003064{
3065 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003066 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003067
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003068 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003069 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003070 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003071 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003072 }
3073 return nelems;
3074}
3075
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003076static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3077 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003078{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003079 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003080 struct pci_dev *pdev = to_pci_dev(hwdev);
3081 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003082 size_t size = 0;
3083 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003084 struct iova *iova = NULL;
3085 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003086 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003087 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003088 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003089
3090 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01003091 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003092 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003093
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003094 domain = get_valid_domain_for_dev(pdev);
3095 if (!domain)
3096 return 0;
3097
Weidong Han8c11e792008-12-08 15:29:22 +08003098 iommu = domain_get_iommu(domain);
3099
David Woodhouseb536d242009-06-28 14:49:31 +01003100 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003101 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003102
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003103 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3104 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003105 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003106 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003107 return 0;
3108 }
3109
3110 /*
3111 * Check if DMAR supports zero-length reads on write only
3112 * mappings..
3113 */
3114 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003115 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003116 prot |= DMA_PTE_READ;
3117 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3118 prot |= DMA_PTE_WRITE;
3119
David Woodhouseb536d242009-06-28 14:49:31 +01003120 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003121
Fenghua Yuf5329592009-08-04 15:09:37 -07003122 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003123 if (unlikely(ret)) {
3124 /* clear the page */
3125 dma_pte_clear_range(domain, start_vpfn,
3126 start_vpfn + size - 1);
3127 /* free page tables */
3128 dma_pte_free_pagetable(domain, start_vpfn,
3129 start_vpfn + size - 1);
3130 /* free iova */
3131 __free_iova(&domain->iovad, iova);
3132 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003133 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003134
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003135 /* it's a non-present to present mapping. Only flush if caching mode */
3136 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03003137 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003138 else
Weidong Han8c11e792008-12-08 15:29:22 +08003139 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003140
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003141 return nelems;
3142}
3143
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003144static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3145{
3146 return !dma_addr;
3147}
3148
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003149struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003150 .alloc = intel_alloc_coherent,
3151 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003152 .map_sg = intel_map_sg,
3153 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003154 .map_page = intel_map_page,
3155 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003156 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003157};
3158
3159static inline int iommu_domain_cache_init(void)
3160{
3161 int ret = 0;
3162
3163 iommu_domain_cache = kmem_cache_create("iommu_domain",
3164 sizeof(struct dmar_domain),
3165 0,
3166 SLAB_HWCACHE_ALIGN,
3167
3168 NULL);
3169 if (!iommu_domain_cache) {
3170 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3171 ret = -ENOMEM;
3172 }
3173
3174 return ret;
3175}
3176
3177static inline int iommu_devinfo_cache_init(void)
3178{
3179 int ret = 0;
3180
3181 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3182 sizeof(struct device_domain_info),
3183 0,
3184 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003185 NULL);
3186 if (!iommu_devinfo_cache) {
3187 printk(KERN_ERR "Couldn't create devinfo cache\n");
3188 ret = -ENOMEM;
3189 }
3190
3191 return ret;
3192}
3193
3194static inline int iommu_iova_cache_init(void)
3195{
3196 int ret = 0;
3197
3198 iommu_iova_cache = kmem_cache_create("iommu_iova",
3199 sizeof(struct iova),
3200 0,
3201 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003202 NULL);
3203 if (!iommu_iova_cache) {
3204 printk(KERN_ERR "Couldn't create iova cache\n");
3205 ret = -ENOMEM;
3206 }
3207
3208 return ret;
3209}
3210
3211static int __init iommu_init_mempool(void)
3212{
3213 int ret;
3214 ret = iommu_iova_cache_init();
3215 if (ret)
3216 return ret;
3217
3218 ret = iommu_domain_cache_init();
3219 if (ret)
3220 goto domain_error;
3221
3222 ret = iommu_devinfo_cache_init();
3223 if (!ret)
3224 return ret;
3225
3226 kmem_cache_destroy(iommu_domain_cache);
3227domain_error:
3228 kmem_cache_destroy(iommu_iova_cache);
3229
3230 return -ENOMEM;
3231}
3232
3233static void __init iommu_exit_mempool(void)
3234{
3235 kmem_cache_destroy(iommu_devinfo_cache);
3236 kmem_cache_destroy(iommu_domain_cache);
3237 kmem_cache_destroy(iommu_iova_cache);
3238
3239}
3240
Dan Williams556ab452010-07-23 15:47:56 -07003241static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3242{
3243 struct dmar_drhd_unit *drhd;
3244 u32 vtbar;
3245 int rc;
3246
3247 /* We know that this device on this chipset has its own IOMMU.
3248 * If we find it under a different IOMMU, then the BIOS is lying
3249 * to us. Hope that the IOMMU for this device is actually
3250 * disabled, and it needs no translation...
3251 */
3252 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3253 if (rc) {
3254 /* "can't" happen */
3255 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3256 return;
3257 }
3258 vtbar &= 0xffff0000;
3259
3260 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3261 drhd = dmar_find_matched_drhd_unit(pdev);
3262 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3263 TAINT_FIRMWARE_WORKAROUND,
3264 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3265 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3266}
3267DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3268
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003269static void __init init_no_remapping_devices(void)
3270{
3271 struct dmar_drhd_unit *drhd;
3272
3273 for_each_drhd_unit(drhd) {
3274 if (!drhd->include_all) {
3275 int i;
3276 for (i = 0; i < drhd->devices_cnt; i++)
3277 if (drhd->devices[i] != NULL)
3278 break;
3279 /* ignore DMAR unit if no pci devices exist */
3280 if (i == drhd->devices_cnt)
3281 drhd->ignored = 1;
3282 }
3283 }
3284
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003285 for_each_drhd_unit(drhd) {
3286 int i;
3287 if (drhd->ignored || drhd->include_all)
3288 continue;
3289
3290 for (i = 0; i < drhd->devices_cnt; i++)
3291 if (drhd->devices[i] &&
David Woodhousec0771df2011-10-14 20:59:46 +01003292 !IS_GFX_DEVICE(drhd->devices[i]))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003293 break;
3294
3295 if (i < drhd->devices_cnt)
3296 continue;
3297
David Woodhousec0771df2011-10-14 20:59:46 +01003298 /* This IOMMU has *only* gfx devices. Either bypass it or
3299 set the gfx_mapped flag, as appropriate */
3300 if (dmar_map_gfx) {
3301 intel_iommu_gfx_mapped = 1;
3302 } else {
3303 drhd->ignored = 1;
3304 for (i = 0; i < drhd->devices_cnt; i++) {
3305 if (!drhd->devices[i])
3306 continue;
3307 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3308 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003309 }
3310 }
3311}
3312
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003313#ifdef CONFIG_SUSPEND
3314static int init_iommu_hw(void)
3315{
3316 struct dmar_drhd_unit *drhd;
3317 struct intel_iommu *iommu = NULL;
3318
3319 for_each_active_iommu(iommu, drhd)
3320 if (iommu->qi)
3321 dmar_reenable_qi(iommu);
3322
Joseph Cihulab7792602011-05-03 00:08:37 -07003323 for_each_iommu(iommu, drhd) {
3324 if (drhd->ignored) {
3325 /*
3326 * we always have to disable PMRs or DMA may fail on
3327 * this device
3328 */
3329 if (force_on)
3330 iommu_disable_protect_mem_regions(iommu);
3331 continue;
3332 }
3333
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003334 iommu_flush_write_buffer(iommu);
3335
3336 iommu_set_root_entry(iommu);
3337
3338 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003339 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003340 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003341 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003342 if (iommu_enable_translation(iommu))
3343 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003344 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003345 }
3346
3347 return 0;
3348}
3349
3350static void iommu_flush_all(void)
3351{
3352 struct dmar_drhd_unit *drhd;
3353 struct intel_iommu *iommu;
3354
3355 for_each_active_iommu(iommu, drhd) {
3356 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003357 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003358 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003359 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003360 }
3361}
3362
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003363static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003364{
3365 struct dmar_drhd_unit *drhd;
3366 struct intel_iommu *iommu = NULL;
3367 unsigned long flag;
3368
3369 for_each_active_iommu(iommu, drhd) {
3370 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3371 GFP_ATOMIC);
3372 if (!iommu->iommu_state)
3373 goto nomem;
3374 }
3375
3376 iommu_flush_all();
3377
3378 for_each_active_iommu(iommu, drhd) {
3379 iommu_disable_translation(iommu);
3380
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003381 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003382
3383 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3384 readl(iommu->reg + DMAR_FECTL_REG);
3385 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3386 readl(iommu->reg + DMAR_FEDATA_REG);
3387 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3388 readl(iommu->reg + DMAR_FEADDR_REG);
3389 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3390 readl(iommu->reg + DMAR_FEUADDR_REG);
3391
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003392 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003393 }
3394 return 0;
3395
3396nomem:
3397 for_each_active_iommu(iommu, drhd)
3398 kfree(iommu->iommu_state);
3399
3400 return -ENOMEM;
3401}
3402
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003403static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003404{
3405 struct dmar_drhd_unit *drhd;
3406 struct intel_iommu *iommu = NULL;
3407 unsigned long flag;
3408
3409 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003410 if (force_on)
3411 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3412 else
3413 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003414 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003415 }
3416
3417 for_each_active_iommu(iommu, drhd) {
3418
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003419 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003420
3421 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3422 iommu->reg + DMAR_FECTL_REG);
3423 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3424 iommu->reg + DMAR_FEDATA_REG);
3425 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3426 iommu->reg + DMAR_FEADDR_REG);
3427 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3428 iommu->reg + DMAR_FEUADDR_REG);
3429
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003430 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003431 }
3432
3433 for_each_active_iommu(iommu, drhd)
3434 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003435}
3436
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003437static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003438 .resume = iommu_resume,
3439 .suspend = iommu_suspend,
3440};
3441
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003442static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003443{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003444 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003445}
3446
3447#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003448static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003449#endif /* CONFIG_PM */
3450
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003451LIST_HEAD(dmar_rmrr_units);
3452
3453static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3454{
3455 list_add(&rmrr->list, &dmar_rmrr_units);
3456}
3457
3458
3459int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3460{
3461 struct acpi_dmar_reserved_memory *rmrr;
3462 struct dmar_rmrr_unit *rmrru;
3463
3464 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3465 if (!rmrru)
3466 return -ENOMEM;
3467
3468 rmrru->hdr = header;
3469 rmrr = (struct acpi_dmar_reserved_memory *)header;
3470 rmrru->base_address = rmrr->base_address;
3471 rmrru->end_address = rmrr->end_address;
3472
3473 dmar_register_rmrr_unit(rmrru);
3474 return 0;
3475}
3476
3477static int __init
3478rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3479{
3480 struct acpi_dmar_reserved_memory *rmrr;
3481 int ret;
3482
3483 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3484 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3485 ((void *)rmrr) + rmrr->header.length,
3486 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3487
3488 if (ret || (rmrru->devices_cnt == 0)) {
3489 list_del(&rmrru->list);
3490 kfree(rmrru);
3491 }
3492 return ret;
3493}
3494
3495static LIST_HEAD(dmar_atsr_units);
3496
3497int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3498{
3499 struct acpi_dmar_atsr *atsr;
3500 struct dmar_atsr_unit *atsru;
3501
3502 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3503 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3504 if (!atsru)
3505 return -ENOMEM;
3506
3507 atsru->hdr = hdr;
3508 atsru->include_all = atsr->flags & 0x1;
3509
3510 list_add(&atsru->list, &dmar_atsr_units);
3511
3512 return 0;
3513}
3514
3515static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3516{
3517 int rc;
3518 struct acpi_dmar_atsr *atsr;
3519
3520 if (atsru->include_all)
3521 return 0;
3522
3523 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3524 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3525 (void *)atsr + atsr->header.length,
3526 &atsru->devices_cnt, &atsru->devices,
3527 atsr->segment);
3528 if (rc || !atsru->devices_cnt) {
3529 list_del(&atsru->list);
3530 kfree(atsru);
3531 }
3532
3533 return rc;
3534}
3535
3536int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3537{
3538 int i;
3539 struct pci_bus *bus;
3540 struct acpi_dmar_atsr *atsr;
3541 struct dmar_atsr_unit *atsru;
3542
3543 dev = pci_physfn(dev);
3544
3545 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3546 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3547 if (atsr->segment == pci_domain_nr(dev->bus))
3548 goto found;
3549 }
3550
3551 return 0;
3552
3553found:
3554 for (bus = dev->bus; bus; bus = bus->parent) {
3555 struct pci_dev *bridge = bus->self;
3556
3557 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003558 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003559 return 0;
3560
Yijing Wang62f87c02012-07-24 17:20:03 +08003561 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003562 for (i = 0; i < atsru->devices_cnt; i++)
3563 if (atsru->devices[i] == bridge)
3564 return 1;
3565 break;
3566 }
3567 }
3568
3569 if (atsru->include_all)
3570 return 1;
3571
3572 return 0;
3573}
3574
Sergey Senozhatskyc8f369a2011-10-26 18:45:39 +03003575int __init dmar_parse_rmrr_atsr_dev(void)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003576{
3577 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3578 struct dmar_atsr_unit *atsr, *atsr_n;
3579 int ret = 0;
3580
3581 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3582 ret = rmrr_parse_dev(rmrr);
3583 if (ret)
3584 return ret;
3585 }
3586
3587 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3588 ret = atsr_parse_dev(atsr);
3589 if (ret)
3590 return ret;
3591 }
3592
3593 return ret;
3594}
3595
Fenghua Yu99dcade2009-11-11 07:23:06 -08003596/*
3597 * Here we only respond to action of unbound device from driver.
3598 *
3599 * Added device is not attached to its DMAR domain here yet. That will happen
3600 * when mapping the device to iova.
3601 */
3602static int device_notifier(struct notifier_block *nb,
3603 unsigned long action, void *data)
3604{
3605 struct device *dev = data;
3606 struct pci_dev *pdev = to_pci_dev(dev);
3607 struct dmar_domain *domain;
3608
David Woodhouse44cd6132009-12-02 10:18:30 +00003609 if (iommu_no_mapping(dev))
3610 return 0;
3611
Fenghua Yu99dcade2009-11-11 07:23:06 -08003612 domain = find_domain(pdev);
3613 if (!domain)
3614 return 0;
3615
Alex Williamsona97590e2011-03-04 14:52:16 -07003616 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
Fenghua Yu99dcade2009-11-11 07:23:06 -08003617 domain_remove_one_dev_info(domain, pdev);
3618
Alex Williamsona97590e2011-03-04 14:52:16 -07003619 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3620 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3621 list_empty(&domain->devices))
3622 domain_exit(domain);
3623 }
3624
Fenghua Yu99dcade2009-11-11 07:23:06 -08003625 return 0;
3626}
3627
3628static struct notifier_block device_nb = {
3629 .notifier_call = device_notifier,
3630};
3631
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003632int __init intel_iommu_init(void)
3633{
3634 int ret = 0;
3635
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003636 /* VT-d is required for a TXT/tboot launch, so enforce that */
3637 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003638
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003639 if (dmar_table_init()) {
3640 if (force_on)
3641 panic("tboot: Failed to initialize DMAR table\n");
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003642 return -ENODEV;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003643 }
3644
Suresh Siddhac2c72862011-08-23 17:05:19 -07003645 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003646 if (force_on)
3647 panic("tboot: Failed to initialize DMAR device scope\n");
3648 return -ENODEV;
3649 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003650
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003651 if (no_iommu || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003652 return -ENODEV;
3653
Joseph Cihula51a63e62011-03-21 11:04:24 -07003654 if (iommu_init_mempool()) {
3655 if (force_on)
3656 panic("tboot: Failed to initialize iommu memory\n");
3657 return -ENODEV;
3658 }
3659
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003660 if (list_empty(&dmar_rmrr_units))
3661 printk(KERN_INFO "DMAR: No RMRR found\n");
3662
3663 if (list_empty(&dmar_atsr_units))
3664 printk(KERN_INFO "DMAR: No ATSR found\n");
3665
Joseph Cihula51a63e62011-03-21 11:04:24 -07003666 if (dmar_init_reserved_ranges()) {
3667 if (force_on)
3668 panic("tboot: Failed to reserve iommu ranges\n");
3669 return -ENODEV;
3670 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003671
3672 init_no_remapping_devices();
3673
Joseph Cihulab7792602011-05-03 00:08:37 -07003674 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003675 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003676 if (force_on)
3677 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003678 printk(KERN_ERR "IOMMU: dmar init failed\n");
3679 put_iova_domain(&reserved_iova_list);
3680 iommu_exit_mempool();
3681 return ret;
3682 }
3683 printk(KERN_INFO
3684 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3685
mark gross5e0d2a62008-03-04 15:22:08 -08003686 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003687#ifdef CONFIG_SWIOTLB
3688 swiotlb = 0;
3689#endif
David Woodhouse19943b02009-08-04 16:19:20 +01003690 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003691
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003692 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003693
Joerg Roedel4236d97d2011-09-06 17:56:07 +02003694 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003695
Fenghua Yu99dcade2009-11-11 07:23:06 -08003696 bus_register_notifier(&pci_bus_type, &device_nb);
3697
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02003698 intel_iommu_enabled = 1;
3699
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003700 return 0;
3701}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003702
Han, Weidong3199aa62009-02-26 17:31:12 +08003703static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3704 struct pci_dev *pdev)
3705{
3706 struct pci_dev *tmp, *parent;
3707
3708 if (!iommu || !pdev)
3709 return;
3710
3711 /* dependent device detach */
3712 tmp = pci_find_upstream_pcie_bridge(pdev);
3713 /* Secondary interface's bus number and devfn 0 */
3714 if (tmp) {
3715 parent = pdev->bus->self;
3716 while (parent != tmp) {
3717 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003718 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003719 parent = parent->bus->self;
3720 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05003721 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08003722 iommu_detach_dev(iommu,
3723 tmp->subordinate->number, 0);
3724 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003725 iommu_detach_dev(iommu, tmp->bus->number,
3726 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003727 }
3728}
3729
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003730static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003731 struct pci_dev *pdev)
3732{
3733 struct device_domain_info *info;
3734 struct intel_iommu *iommu;
3735 unsigned long flags;
3736 int found = 0;
3737 struct list_head *entry, *tmp;
3738
David Woodhouse276dbf992009-04-04 01:45:37 +01003739 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3740 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003741 if (!iommu)
3742 return;
3743
3744 spin_lock_irqsave(&device_domain_lock, flags);
3745 list_for_each_safe(entry, tmp, &domain->devices) {
3746 info = list_entry(entry, struct device_domain_info, link);
Mike Habeck8519dc42011-05-28 13:15:07 -05003747 if (info->segment == pci_domain_nr(pdev->bus) &&
3748 info->bus == pdev->bus->number &&
Weidong Hanc7151a82008-12-08 22:51:37 +08003749 info->devfn == pdev->devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01003750 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003751 spin_unlock_irqrestore(&device_domain_lock, flags);
3752
Yu Zhao93a23a72009-05-18 13:51:37 +08003753 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003754 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003755 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003756 free_devinfo_mem(info);
3757
3758 spin_lock_irqsave(&device_domain_lock, flags);
3759
3760 if (found)
3761 break;
3762 else
3763 continue;
3764 }
3765
3766 /* if there is no other devices under the same iommu
3767 * owned by this domain, clear this iommu in iommu_bmp
3768 * update iommu count and coherency
3769 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003770 if (iommu == device_to_iommu(info->segment, info->bus,
3771 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003772 found = 1;
3773 }
3774
Roland Dreier3e7abe22011-07-20 06:22:21 -07003775 spin_unlock_irqrestore(&device_domain_lock, flags);
3776
Weidong Hanc7151a82008-12-08 22:51:37 +08003777 if (found == 0) {
3778 unsigned long tmp_flags;
3779 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08003780 clear_bit(iommu->seq_id, domain->iommu_bmp);
Weidong Hanc7151a82008-12-08 22:51:37 +08003781 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003782 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003783 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07003784
Alex Williamson9b4554b2011-05-24 12:19:04 -04003785 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3786 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3787 spin_lock_irqsave(&iommu->lock, tmp_flags);
3788 clear_bit(domain->id, iommu->domain_ids);
3789 iommu->domains[domain->id] = NULL;
3790 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3791 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003792 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003793}
3794
3795static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3796{
3797 struct device_domain_info *info;
3798 struct intel_iommu *iommu;
3799 unsigned long flags1, flags2;
3800
3801 spin_lock_irqsave(&device_domain_lock, flags1);
3802 while (!list_empty(&domain->devices)) {
3803 info = list_entry(domain->devices.next,
3804 struct device_domain_info, link);
David Woodhouse109b9b02012-05-25 17:43:02 +01003805 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003806 spin_unlock_irqrestore(&device_domain_lock, flags1);
3807
Yu Zhao93a23a72009-05-18 13:51:37 +08003808 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003809 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003810 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003811 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003812
3813 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003814 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003815 */
3816 spin_lock_irqsave(&domain->iommu_lock, flags2);
3817 if (test_and_clear_bit(iommu->seq_id,
Mike Travis1b198bb2012-03-05 15:05:16 -08003818 domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08003819 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003820 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003821 }
3822 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3823
3824 free_devinfo_mem(info);
3825 spin_lock_irqsave(&device_domain_lock, flags1);
3826 }
3827 spin_unlock_irqrestore(&device_domain_lock, flags1);
3828}
3829
Weidong Han5e98c4b2008-12-08 23:03:27 +08003830/* domain id for virtual machine, it won't be set in context */
3831static unsigned long vm_domid;
3832
3833static struct dmar_domain *iommu_alloc_vm_domain(void)
3834{
3835 struct dmar_domain *domain;
3836
3837 domain = alloc_domain_mem();
3838 if (!domain)
3839 return NULL;
3840
3841 domain->id = vm_domid++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003842 domain->nid = -1;
Mike Travis1b198bb2012-03-05 15:05:16 -08003843 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003844 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3845
3846 return domain;
3847}
3848
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003849static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003850{
3851 int adjust_width;
3852
3853 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003854 spin_lock_init(&domain->iommu_lock);
3855
3856 domain_reserve_special_ranges(domain);
3857
3858 /* calculate AGAW */
3859 domain->gaw = guest_width;
3860 adjust_width = guestwidth_to_adjustwidth(guest_width);
3861 domain->agaw = width_to_agaw(adjust_width);
3862
3863 INIT_LIST_HEAD(&domain->devices);
3864
3865 domain->iommu_count = 0;
3866 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003867 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01003868 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003869 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003870 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003871
3872 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07003873 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003874 if (!domain->pgd)
3875 return -ENOMEM;
3876 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3877 return 0;
3878}
3879
3880static void iommu_free_vm_domain(struct dmar_domain *domain)
3881{
3882 unsigned long flags;
3883 struct dmar_drhd_unit *drhd;
3884 struct intel_iommu *iommu;
3885 unsigned long i;
3886 unsigned long ndomains;
3887
3888 for_each_drhd_unit(drhd) {
3889 if (drhd->ignored)
3890 continue;
3891 iommu = drhd->iommu;
3892
3893 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08003894 for_each_set_bit(i, iommu->domain_ids, ndomains) {
Weidong Han5e98c4b2008-12-08 23:03:27 +08003895 if (iommu->domains[i] == domain) {
3896 spin_lock_irqsave(&iommu->lock, flags);
3897 clear_bit(i, iommu->domain_ids);
3898 iommu->domains[i] = NULL;
3899 spin_unlock_irqrestore(&iommu->lock, flags);
3900 break;
3901 }
Weidong Han5e98c4b2008-12-08 23:03:27 +08003902 }
3903 }
3904}
3905
3906static void vm_domain_exit(struct dmar_domain *domain)
3907{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003908 /* Domain 0 is reserved, so dont process it */
3909 if (!domain)
3910 return;
3911
3912 vm_domain_remove_all_dev_info(domain);
3913 /* destroy iovas */
3914 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003915
3916 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003917 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003918
3919 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003920 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003921
3922 iommu_free_vm_domain(domain);
3923 free_domain_mem(domain);
3924}
3925
Joerg Roedel5d450802008-12-03 14:52:32 +01003926static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003927{
Joerg Roedel5d450802008-12-03 14:52:32 +01003928 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003929
Joerg Roedel5d450802008-12-03 14:52:32 +01003930 dmar_domain = iommu_alloc_vm_domain();
3931 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003932 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003933 "intel_iommu_domain_init: dmar_domain == NULL\n");
3934 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003935 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003936 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003937 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003938 "intel_iommu_domain_init() failed\n");
3939 vm_domain_exit(dmar_domain);
3940 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003941 }
Allen Kay8140a952011-10-14 12:32:17 -07003942 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01003943 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003944
Joerg Roedel8a0e7152012-01-26 19:40:54 +01003945 domain->geometry.aperture_start = 0;
3946 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
3947 domain->geometry.force_aperture = true;
3948
Joerg Roedel5d450802008-12-03 14:52:32 +01003949 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003950}
Kay, Allen M38717942008-09-09 18:37:29 +03003951
Joerg Roedel5d450802008-12-03 14:52:32 +01003952static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003953{
Joerg Roedel5d450802008-12-03 14:52:32 +01003954 struct dmar_domain *dmar_domain = domain->priv;
3955
3956 domain->priv = NULL;
3957 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003958}
Kay, Allen M38717942008-09-09 18:37:29 +03003959
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003960static int intel_iommu_attach_device(struct iommu_domain *domain,
3961 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003962{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003963 struct dmar_domain *dmar_domain = domain->priv;
3964 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003965 struct intel_iommu *iommu;
3966 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03003967
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003968 /* normally pdev is not mapped */
3969 if (unlikely(domain_context_mapped(pdev))) {
3970 struct dmar_domain *old_domain;
3971
3972 old_domain = find_domain(pdev);
3973 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003974 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3975 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3976 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003977 else
3978 domain_remove_dev_info(old_domain);
3979 }
3980 }
3981
David Woodhouse276dbf992009-04-04 01:45:37 +01003982 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3983 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003984 if (!iommu)
3985 return -ENODEV;
3986
3987 /* check if this iommu agaw is sufficient for max mapped address */
3988 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01003989 if (addr_width > cap_mgaw(iommu->cap))
3990 addr_width = cap_mgaw(iommu->cap);
3991
3992 if (dmar_domain->max_addr > (1LL << addr_width)) {
3993 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003994 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01003995 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003996 return -EFAULT;
3997 }
Tom Lyona99c47a2010-05-17 08:20:45 +01003998 dmar_domain->gaw = addr_width;
3999
4000 /*
4001 * Knock out extra levels of page tables if necessary
4002 */
4003 while (iommu->agaw < dmar_domain->agaw) {
4004 struct dma_pte *pte;
4005
4006 pte = dmar_domain->pgd;
4007 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004008 dmar_domain->pgd = (struct dma_pte *)
4009 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004010 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004011 }
4012 dmar_domain->agaw--;
4013 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004014
David Woodhouse5fe60f42009-08-09 10:53:41 +01004015 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004016}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004017
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004018static void intel_iommu_detach_device(struct iommu_domain *domain,
4019 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004020{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004021 struct dmar_domain *dmar_domain = domain->priv;
4022 struct pci_dev *pdev = to_pci_dev(dev);
4023
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004024 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03004025}
Kay, Allen M38717942008-09-09 18:37:29 +03004026
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004027static int intel_iommu_map(struct iommu_domain *domain,
4028 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004029 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004030{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004031 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004032 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004033 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004034 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004035
Joerg Roedeldde57a22008-12-03 15:04:09 +01004036 if (iommu_prot & IOMMU_READ)
4037 prot |= DMA_PTE_READ;
4038 if (iommu_prot & IOMMU_WRITE)
4039 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08004040 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4041 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004042
David Woodhouse163cc522009-06-28 00:51:17 +01004043 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004044 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004045 u64 end;
4046
4047 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004048 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004049 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004050 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004051 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004052 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004053 return -EFAULT;
4054 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004055 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004056 }
David Woodhousead051222009-06-28 14:22:28 +01004057 /* Round up size to next multiple of PAGE_SIZE, if it and
4058 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004059 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004060 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4061 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004062 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004063}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004064
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004065static size_t intel_iommu_unmap(struct iommu_domain *domain,
4066 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004067{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004068 struct dmar_domain *dmar_domain = domain->priv;
Allen Kay292827c2011-10-14 12:31:54 -07004069 int order;
Sheng Yang4b99d352009-07-08 11:52:52 +01004070
Allen Kay292827c2011-10-14 12:31:54 -07004071 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
David Woodhouse163cc522009-06-28 00:51:17 +01004072 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004073
David Woodhouse163cc522009-06-28 00:51:17 +01004074 if (dmar_domain->max_addr == iova + size)
4075 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004076
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004077 return PAGE_SIZE << order;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004078}
Kay, Allen M38717942008-09-09 18:37:29 +03004079
Joerg Roedeld14d6572008-12-03 15:06:57 +01004080static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4081 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004082{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004083 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004084 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004085 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004086
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004087 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
Kay, Allen M38717942008-09-09 18:37:29 +03004088 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004089 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004090
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004091 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004092}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004093
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004094static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4095 unsigned long cap)
4096{
4097 struct dmar_domain *dmar_domain = domain->priv;
4098
4099 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4100 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004101 if (cap == IOMMU_CAP_INTR_REMAP)
Suresh Siddha95a02e92012-03-30 11:47:07 -07004102 return irq_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004103
4104 return 0;
4105}
4106
Alex Williamson783f1572012-05-30 14:19:43 -06004107static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
4108{
4109 pci_dev_put(*from);
4110 *from = to;
4111}
4112
4113#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4114
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004115static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004116{
4117 struct pci_dev *pdev = to_pci_dev(dev);
Alex Williamson3da4af02012-11-13 10:22:03 -07004118 struct pci_dev *bridge, *dma_pdev = NULL;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004119 struct iommu_group *group;
4120 int ret;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004121
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004122 if (!device_to_iommu(pci_domain_nr(pdev->bus),
4123 pdev->bus->number, pdev->devfn))
Alex Williamson70ae6f02011-10-21 15:56:11 -04004124 return -ENODEV;
4125
4126 bridge = pci_find_upstream_pcie_bridge(pdev);
4127 if (bridge) {
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004128 if (pci_is_pcie(bridge))
4129 dma_pdev = pci_get_domain_bus_and_slot(
4130 pci_domain_nr(pdev->bus),
4131 bridge->subordinate->number, 0);
Alex Williamson3da4af02012-11-13 10:22:03 -07004132 if (!dma_pdev)
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004133 dma_pdev = pci_dev_get(bridge);
4134 } else
4135 dma_pdev = pci_dev_get(pdev);
4136
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004137 /* Account for quirked devices */
Alex Williamson783f1572012-05-30 14:19:43 -06004138 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
4139
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004140 /*
4141 * If it's a multifunction device that does not support our
4142 * required ACS flags, add to the same group as function 0.
4143 */
Alex Williamson783f1572012-05-30 14:19:43 -06004144 if (dma_pdev->multifunction &&
4145 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
4146 swap_pci_ref(&dma_pdev,
4147 pci_get_slot(dma_pdev->bus,
4148 PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
4149 0)));
4150
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004151 /*
4152 * Devices on the root bus go through the iommu. If that's not us,
4153 * find the next upstream device and test ACS up to the root bus.
4154 * Finding the next device may require skipping virtual buses.
4155 */
Alex Williamson783f1572012-05-30 14:19:43 -06004156 while (!pci_is_root_bus(dma_pdev->bus)) {
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004157 struct pci_bus *bus = dma_pdev->bus;
4158
4159 while (!bus->self) {
4160 if (!pci_is_root_bus(bus))
4161 bus = bus->parent;
4162 else
4163 goto root_bus;
4164 }
4165
4166 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
Alex Williamson783f1572012-05-30 14:19:43 -06004167 break;
4168
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004169 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
Alex Williamson70ae6f02011-10-21 15:56:11 -04004170 }
4171
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004172root_bus:
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004173 group = iommu_group_get(&dma_pdev->dev);
4174 pci_dev_put(dma_pdev);
4175 if (!group) {
4176 group = iommu_group_alloc();
4177 if (IS_ERR(group))
4178 return PTR_ERR(group);
4179 }
Alex Williamsonbcb71ab2011-10-21 15:56:24 -04004180
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004181 ret = iommu_group_add_device(group, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004182
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004183 iommu_group_put(group);
4184 return ret;
4185}
4186
4187static void intel_iommu_remove_device(struct device *dev)
4188{
4189 iommu_group_remove_device(dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004190}
4191
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004192static struct iommu_ops intel_iommu_ops = {
4193 .domain_init = intel_iommu_domain_init,
4194 .domain_destroy = intel_iommu_domain_destroy,
4195 .attach_dev = intel_iommu_attach_device,
4196 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004197 .map = intel_iommu_map,
4198 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004199 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004200 .domain_has_cap = intel_iommu_domain_has_cap,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004201 .add_device = intel_iommu_add_device,
4202 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004203 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004204};
David Woodhouse9af88142009-02-13 23:18:03 +00004205
4206static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
4207{
4208 /*
4209 * Mobile 4 Series Chipset neglects to set RWBF capability,
4210 * but needs it:
4211 */
4212 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4213 rwbf_quirk = 1;
David Woodhouse2d9e6672010-06-15 10:57:57 +01004214
4215 /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
4216 if (dev->revision == 0x07) {
4217 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4218 dmar_map_gfx = 0;
4219 }
David Woodhouse9af88142009-02-13 23:18:03 +00004220}
4221
4222DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004223
Adam Jacksoneecfd572010-08-25 21:17:34 +01004224#define GGC 0x52
4225#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4226#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4227#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4228#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4229#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4230#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4231#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4232#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4233
David Woodhouse9eecabc2010-09-21 22:28:23 +01004234static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4235{
4236 unsigned short ggc;
4237
Adam Jacksoneecfd572010-08-25 21:17:34 +01004238 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004239 return;
4240
Adam Jacksoneecfd572010-08-25 21:17:34 +01004241 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004242 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4243 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004244 } else if (dmar_map_gfx) {
4245 /* we have to ensure the gfx device is idle before we flush */
4246 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4247 intel_iommu_strict = 1;
4248 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004249}
4250DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4251DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4252DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4253DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4254
David Woodhousee0fc7e02009-09-30 09:12:17 -07004255/* On Tylersburg chipsets, some BIOSes have been known to enable the
4256 ISOCH DMAR unit for the Azalia sound device, but not give it any
4257 TLB entries, which causes it to deadlock. Check for that. We do
4258 this in a function called from init_dmars(), instead of in a PCI
4259 quirk, because we don't want to print the obnoxious "BIOS broken"
4260 message if VT-d is actually disabled.
4261*/
4262static void __init check_tylersburg_isoch(void)
4263{
4264 struct pci_dev *pdev;
4265 uint32_t vtisochctrl;
4266
4267 /* If there's no Azalia in the system anyway, forget it. */
4268 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4269 if (!pdev)
4270 return;
4271 pci_dev_put(pdev);
4272
4273 /* System Management Registers. Might be hidden, in which case
4274 we can't do the sanity check. But that's OK, because the
4275 known-broken BIOSes _don't_ actually hide it, so far. */
4276 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4277 if (!pdev)
4278 return;
4279
4280 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4281 pci_dev_put(pdev);
4282 return;
4283 }
4284
4285 pci_dev_put(pdev);
4286
4287 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4288 if (vtisochctrl & 1)
4289 return;
4290
4291 /* Drop all bits other than the number of TLB entries */
4292 vtisochctrl &= 0x1c;
4293
4294 /* If we have the recommended number of TLB entries (16), fine. */
4295 if (vtisochctrl == 0x10)
4296 return;
4297
4298 /* Zero TLB entries? You get to ride the short bus to school. */
4299 if (!vtisochctrl) {
4300 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4301 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4302 dmi_get_system_info(DMI_BIOS_VENDOR),
4303 dmi_get_system_info(DMI_BIOS_VERSION),
4304 dmi_get_system_info(DMI_PRODUCT_VERSION));
4305 iommu_identity_mapping |= IDENTMAP_AZALIA;
4306 return;
4307 }
4308
4309 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4310 vtisochctrl);
4311}