blob: b9d091157884570f636492be2e49e2e346697664 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040027#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070028#include <linux/slab.h>
29#include <linux/irq.h>
30#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070031#include <linux/spinlock.h>
32#include <linux/pci.h>
33#include <linux/dmar.h>
34#include <linux/dma-mapping.h>
35#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080044#include <linux/memblock.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070045#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090047#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070048
Fenghua Yu5b6985c2008-10-16 18:02:32 -070049#define ROOT_SIZE VTD_PAGE_SIZE
50#define CONTEXT_SIZE VTD_PAGE_SIZE
51
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070054#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055
56#define IOAPIC_RANGE_START (0xfee00000)
57#define IOAPIC_RANGE_END (0xfeefffff)
58#define IOVA_START_ADDR (0x1000)
59
60#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
61
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070062#define MAX_AGAW_WIDTH 64
63
David Woodhouse2ebe3152009-09-19 07:34:04 -070064#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
65#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
66
67/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
68 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
69#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
70 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
71#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070072
Mark McLoughlinf27be032008-11-20 15:49:43 +000073#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070074#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070075#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080076
Andrew Mortondf08cdc2010-09-22 13:05:11 -070077/* page table handling */
78#define LEVEL_STRIDE (9)
79#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
80
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020081/*
82 * This bitmap is used to advertise the page sizes our hardware support
83 * to the IOMMU core, which will then use this information to split
84 * physically contiguous memory regions it is mapping into page sizes
85 * that we support.
86 *
87 * Traditionally the IOMMU core just handed us the mappings directly,
88 * after making sure the size is an order of a 4KiB page and that the
89 * mapping has natural alignment.
90 *
91 * To retain this behavior, we currently advertise that we support
92 * all page sizes that are an order of 4KiB.
93 *
94 * If at some point we'd like to utilize the IOMMU core's new behavior,
95 * we could change this to advertise the real page sizes we support.
96 */
97#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
98
Andrew Mortondf08cdc2010-09-22 13:05:11 -070099static inline int agaw_to_level(int agaw)
100{
101 return agaw + 2;
102}
103
104static inline int agaw_to_width(int agaw)
105{
106 return 30 + agaw * LEVEL_STRIDE;
107}
108
109static inline int width_to_agaw(int width)
110{
111 return (width - 30) / LEVEL_STRIDE;
112}
113
114static inline unsigned int level_to_offset_bits(int level)
115{
116 return (level - 1) * LEVEL_STRIDE;
117}
118
119static inline int pfn_level_offset(unsigned long pfn, int level)
120{
121 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
122}
123
124static inline unsigned long level_mask(int level)
125{
126 return -1UL << level_to_offset_bits(level);
127}
128
129static inline unsigned long level_size(int level)
130{
131 return 1UL << level_to_offset_bits(level);
132}
133
134static inline unsigned long align_to_level(unsigned long pfn, int level)
135{
136 return (pfn + level_size(level) - 1) & level_mask(level);
137}
David Woodhousefd18de52009-05-10 23:57:41 +0100138
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100139static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
140{
141 return 1 << ((lvl - 1) * LEVEL_STRIDE);
142}
143
David Woodhousedd4e8312009-06-27 16:21:20 +0100144/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
145 are never going to work. */
146static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
147{
148 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
149}
150
151static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
152{
153 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
154}
155static inline unsigned long page_to_dma_pfn(struct page *pg)
156{
157 return mm_to_dma_pfn(page_to_pfn(pg));
158}
159static inline unsigned long virt_to_dma_pfn(void *p)
160{
161 return page_to_dma_pfn(virt_to_page(p));
162}
163
Weidong Hand9630fe2008-12-08 11:06:32 +0800164/* global iommu list, set NULL for ignored DMAR units */
165static struct intel_iommu **g_iommus;
166
David Woodhousee0fc7e02009-09-30 09:12:17 -0700167static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000168static int rwbf_quirk;
169
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000170/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700171 * set to 1 to panic kernel if can't successfully enable VT-d
172 * (used when kernel is launched w/ TXT)
173 */
174static int force_on = 0;
175
176/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000177 * 0: Present
178 * 1-11: Reserved
179 * 12-63: Context Ptr (12 - (haw-1))
180 * 64-127: Reserved
181 */
182struct root_entry {
183 u64 val;
184 u64 rsvd1;
185};
186#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
187static inline bool root_present(struct root_entry *root)
188{
189 return (root->val & 1);
190}
191static inline void set_root_present(struct root_entry *root)
192{
193 root->val |= 1;
194}
195static inline void set_root_value(struct root_entry *root, unsigned long value)
196{
197 root->val |= value & VTD_PAGE_MASK;
198}
199
200static inline struct context_entry *
201get_context_addr_from_root(struct root_entry *root)
202{
203 return (struct context_entry *)
204 (root_present(root)?phys_to_virt(
205 root->val & VTD_PAGE_MASK) :
206 NULL);
207}
208
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000209/*
210 * low 64 bits:
211 * 0: present
212 * 1: fault processing disable
213 * 2-3: translation type
214 * 12-63: address space root
215 * high 64 bits:
216 * 0-2: address width
217 * 3-6: aval
218 * 8-23: domain id
219 */
220struct context_entry {
221 u64 lo;
222 u64 hi;
223};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000224
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000225static inline bool context_present(struct context_entry *context)
226{
227 return (context->lo & 1);
228}
229static inline void context_set_present(struct context_entry *context)
230{
231 context->lo |= 1;
232}
233
234static inline void context_set_fault_enable(struct context_entry *context)
235{
236 context->lo &= (((u64)-1) << 2) | 1;
237}
238
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000239static inline void context_set_translation_type(struct context_entry *context,
240 unsigned long value)
241{
242 context->lo &= (((u64)-1) << 4) | 3;
243 context->lo |= (value & 3) << 2;
244}
245
246static inline void context_set_address_root(struct context_entry *context,
247 unsigned long value)
248{
249 context->lo |= value & VTD_PAGE_MASK;
250}
251
252static inline void context_set_address_width(struct context_entry *context,
253 unsigned long value)
254{
255 context->hi |= value & 7;
256}
257
258static inline void context_set_domain_id(struct context_entry *context,
259 unsigned long value)
260{
261 context->hi |= (value & ((1 << 16) - 1)) << 8;
262}
263
264static inline void context_clear_entry(struct context_entry *context)
265{
266 context->lo = 0;
267 context->hi = 0;
268}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000269
Mark McLoughlin622ba122008-11-20 15:49:46 +0000270/*
271 * 0: readable
272 * 1: writable
273 * 2-6: reserved
274 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800275 * 8-10: available
276 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000277 * 12-63: Host physcial address
278 */
279struct dma_pte {
280 u64 val;
281};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000282
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000283static inline void dma_clear_pte(struct dma_pte *pte)
284{
285 pte->val = 0;
286}
287
288static inline void dma_set_pte_readable(struct dma_pte *pte)
289{
290 pte->val |= DMA_PTE_READ;
291}
292
293static inline void dma_set_pte_writable(struct dma_pte *pte)
294{
295 pte->val |= DMA_PTE_WRITE;
296}
297
Sheng Yang9cf066972009-03-18 15:33:07 +0800298static inline void dma_set_pte_snp(struct dma_pte *pte)
299{
300 pte->val |= DMA_PTE_SNP;
301}
302
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000303static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
304{
305 pte->val = (pte->val & ~3) | (prot & 3);
306}
307
308static inline u64 dma_pte_addr(struct dma_pte *pte)
309{
David Woodhousec85994e2009-07-01 19:21:24 +0100310#ifdef CONFIG_64BIT
311 return pte->val & VTD_PAGE_MASK;
312#else
313 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100314 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100315#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000316}
317
David Woodhousedd4e8312009-06-27 16:21:20 +0100318static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000319{
David Woodhousedd4e8312009-06-27 16:21:20 +0100320 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000321}
322
323static inline bool dma_pte_present(struct dma_pte *pte)
324{
325 return (pte->val & 3) != 0;
326}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000327
Allen Kay4399c8b2011-10-14 12:32:46 -0700328static inline bool dma_pte_superpage(struct dma_pte *pte)
329{
330 return (pte->val & (1 << 7));
331}
332
David Woodhouse75e6bf92009-07-02 11:21:16 +0100333static inline int first_pte_in_page(struct dma_pte *pte)
334{
335 return !((unsigned long)pte & ~VTD_PAGE_MASK);
336}
337
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700338/*
339 * This domain is a statically identity mapping domain.
340 * 1. This domain creats a static 1:1 mapping to all usable memory.
341 * 2. It maps to each iommu if successful.
342 * 3. Each iommu mapps to this domain if successful.
343 */
David Woodhouse19943b02009-08-04 16:19:20 +0100344static struct dmar_domain *si_domain;
345static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700346
Weidong Han3b5410e2008-12-08 09:17:15 +0800347/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100348#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800349
Weidong Han1ce28fe2008-12-08 16:35:39 +0800350/* domain represents a virtual machine, more than one devices
351 * across iommus may be owned in one domain, e.g. kvm guest.
352 */
353#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
354
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700355/* si_domain contains mulitple devices */
356#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
357
Mike Travis1b198bb2012-03-05 15:05:16 -0800358/* define the limit of IOMMUs supported in each domain */
359#ifdef CONFIG_X86
360# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
361#else
362# define IOMMU_UNITS_SUPPORTED 64
363#endif
364
Mark McLoughlin99126f72008-11-20 15:49:47 +0000365struct dmar_domain {
366 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700367 int nid; /* node id */
Mike Travis1b198bb2012-03-05 15:05:16 -0800368 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
369 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000370
371 struct list_head devices; /* all devices' list */
372 struct iova_domain iovad; /* iova's that belong to this domain */
373
374 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000375 int gaw; /* max guest address width */
376
377 /* adjusted guest address width, 0 is level 2 30-bit */
378 int agaw;
379
Weidong Han3b5410e2008-12-08 09:17:15 +0800380 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800381
382 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800383 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800384 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100385 int iommu_superpage;/* Level of superpages supported:
386 0 == 4KiB (no superpages), 1 == 2MiB,
387 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800388 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800389 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000390};
391
Mark McLoughlina647dac2008-11-20 15:49:48 +0000392/* PCI domain-device relationship */
393struct device_domain_info {
394 struct list_head link; /* link to domain siblings */
395 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100396 int segment; /* PCI domain */
397 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000398 u8 devfn; /* PCI devfn number */
Stefan Assmann45e829e2009-12-03 06:49:24 -0500399 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800400 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000401 struct dmar_domain *domain; /* pointer to domain */
402};
403
mark gross5e0d2a62008-03-04 15:22:08 -0800404static void flush_unmaps_timeout(unsigned long data);
405
406DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
407
mark gross80b20dd2008-04-18 13:53:58 -0700408#define HIGH_WATER_MARK 250
409struct deferred_flush_tables {
410 int next;
411 struct iova *iova[HIGH_WATER_MARK];
412 struct dmar_domain *domain[HIGH_WATER_MARK];
413};
414
415static struct deferred_flush_tables *deferred_flush;
416
mark gross5e0d2a62008-03-04 15:22:08 -0800417/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800418static int g_num_of_iommus;
419
420static DEFINE_SPINLOCK(async_umap_flush_lock);
421static LIST_HEAD(unmaps_to_do);
422
423static int timer_on;
424static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800425
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700426static void domain_remove_dev_info(struct dmar_domain *domain);
427
Suresh Siddhad3f13812011-08-23 17:05:25 -0700428#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800429int dmar_disabled = 0;
430#else
431int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700432#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800433
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200434int intel_iommu_enabled = 0;
435EXPORT_SYMBOL_GPL(intel_iommu_enabled);
436
David Woodhouse2d9e6672010-06-15 10:57:57 +0100437static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700438static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800439static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100440static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700441
David Woodhousec0771df2011-10-14 20:59:46 +0100442int intel_iommu_gfx_mapped;
443EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
444
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700445#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
446static DEFINE_SPINLOCK(device_domain_lock);
447static LIST_HEAD(device_domain_list);
448
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100449static struct iommu_ops intel_iommu_ops;
450
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700451static int __init intel_iommu_setup(char *str)
452{
453 if (!str)
454 return -EINVAL;
455 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800456 if (!strncmp(str, "on", 2)) {
457 dmar_disabled = 0;
458 printk(KERN_INFO "Intel-IOMMU: enabled\n");
459 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700460 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800461 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700462 } else if (!strncmp(str, "igfx_off", 8)) {
463 dmar_map_gfx = 0;
464 printk(KERN_INFO
465 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700466 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800467 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700468 "Intel-IOMMU: Forcing DAC for PCI devices\n");
469 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800470 } else if (!strncmp(str, "strict", 6)) {
471 printk(KERN_INFO
472 "Intel-IOMMU: disable batched IOTLB flush\n");
473 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100474 } else if (!strncmp(str, "sp_off", 6)) {
475 printk(KERN_INFO
476 "Intel-IOMMU: disable supported super page\n");
477 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700478 }
479
480 str += strcspn(str, ",");
481 while (*str == ',')
482 str++;
483 }
484 return 0;
485}
486__setup("intel_iommu=", intel_iommu_setup);
487
488static struct kmem_cache *iommu_domain_cache;
489static struct kmem_cache *iommu_devinfo_cache;
490static struct kmem_cache *iommu_iova_cache;
491
Suresh Siddha4c923d42009-10-02 11:01:24 -0700492static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700493{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700494 struct page *page;
495 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700496
Suresh Siddha4c923d42009-10-02 11:01:24 -0700497 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
498 if (page)
499 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700500 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700501}
502
503static inline void free_pgtable_page(void *vaddr)
504{
505 free_page((unsigned long)vaddr);
506}
507
508static inline void *alloc_domain_mem(void)
509{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900510 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700511}
512
Kay, Allen M38717942008-09-09 18:37:29 +0300513static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700514{
515 kmem_cache_free(iommu_domain_cache, vaddr);
516}
517
518static inline void * alloc_devinfo_mem(void)
519{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900520 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700521}
522
523static inline void free_devinfo_mem(void *vaddr)
524{
525 kmem_cache_free(iommu_devinfo_cache, vaddr);
526}
527
528struct iova *alloc_iova_mem(void)
529{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900530 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700531}
532
533void free_iova_mem(struct iova *iova)
534{
535 kmem_cache_free(iommu_iova_cache, iova);
536}
537
Weidong Han1b573682008-12-08 15:34:06 +0800538
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700539static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800540{
541 unsigned long sagaw;
542 int agaw = -1;
543
544 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700545 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800546 agaw >= 0; agaw--) {
547 if (test_bit(agaw, &sagaw))
548 break;
549 }
550
551 return agaw;
552}
553
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700554/*
555 * Calculate max SAGAW for each iommu.
556 */
557int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
558{
559 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
560}
561
562/*
563 * calculate agaw for each iommu.
564 * "SAGAW" may be different across iommus, use a default agaw, and
565 * get a supported less agaw for iommus that don't support the default agaw.
566 */
567int iommu_calculate_agaw(struct intel_iommu *iommu)
568{
569 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
570}
571
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700572/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800573static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
574{
575 int iommu_id;
576
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700577 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800578 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700579 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800580
Mike Travis1b198bb2012-03-05 15:05:16 -0800581 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800582 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
583 return NULL;
584
585 return g_iommus[iommu_id];
586}
587
Weidong Han8e6040972008-12-08 15:49:06 +0800588static void domain_update_iommu_coherency(struct dmar_domain *domain)
589{
590 int i;
591
Alex Williamson2e12bc22011-11-11 17:26:44 -0700592 i = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
593
594 domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800595
Mike Travis1b198bb2012-03-05 15:05:16 -0800596 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Weidong Han8e6040972008-12-08 15:49:06 +0800597 if (!ecap_coherent(g_iommus[i]->ecap)) {
598 domain->iommu_coherency = 0;
599 break;
600 }
Weidong Han8e6040972008-12-08 15:49:06 +0800601 }
602}
603
Sheng Yang58c610b2009-03-18 15:33:05 +0800604static void domain_update_iommu_snooping(struct dmar_domain *domain)
605{
606 int i;
607
608 domain->iommu_snooping = 1;
609
Mike Travis1b198bb2012-03-05 15:05:16 -0800610 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800611 if (!ecap_sc_support(g_iommus[i]->ecap)) {
612 domain->iommu_snooping = 0;
613 break;
614 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800615 }
616}
617
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100618static void domain_update_iommu_superpage(struct dmar_domain *domain)
619{
Allen Kay8140a952011-10-14 12:32:17 -0700620 struct dmar_drhd_unit *drhd;
621 struct intel_iommu *iommu = NULL;
622 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100623
624 if (!intel_iommu_superpage) {
625 domain->iommu_superpage = 0;
626 return;
627 }
628
Allen Kay8140a952011-10-14 12:32:17 -0700629 /* set iommu_superpage to the smallest common denominator */
630 for_each_active_iommu(iommu, drhd) {
631 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100632 if (!mask) {
633 break;
634 }
635 }
636 domain->iommu_superpage = fls(mask);
637}
638
Sheng Yang58c610b2009-03-18 15:33:05 +0800639/* Some capabilities may be different across iommus */
640static void domain_update_iommu_cap(struct dmar_domain *domain)
641{
642 domain_update_iommu_coherency(domain);
643 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100644 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800645}
646
David Woodhouse276dbf992009-04-04 01:45:37 +0100647static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800648{
649 struct dmar_drhd_unit *drhd = NULL;
650 int i;
651
652 for_each_drhd_unit(drhd) {
653 if (drhd->ignored)
654 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100655 if (segment != drhd->segment)
656 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800657
David Woodhouse924b6232009-04-04 00:39:25 +0100658 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000659 if (drhd->devices[i] &&
660 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800661 drhd->devices[i]->devfn == devfn)
662 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700663 if (drhd->devices[i] &&
664 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100665 drhd->devices[i]->subordinate->number <= bus &&
Yinghai Lub918c622012-05-17 18:51:11 -0700666 drhd->devices[i]->subordinate->busn_res.end >= bus)
David Woodhouse924b6232009-04-04 00:39:25 +0100667 return drhd->iommu;
668 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800669
670 if (drhd->include_all)
671 return drhd->iommu;
672 }
673
674 return NULL;
675}
676
Weidong Han5331fe62008-12-08 23:00:00 +0800677static void domain_flush_cache(struct dmar_domain *domain,
678 void *addr, int size)
679{
680 if (!domain->iommu_coherency)
681 clflush_cache_range(addr, size);
682}
683
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700684/* Gets context entry for a given bus and devfn */
685static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
686 u8 bus, u8 devfn)
687{
688 struct root_entry *root;
689 struct context_entry *context;
690 unsigned long phy_addr;
691 unsigned long flags;
692
693 spin_lock_irqsave(&iommu->lock, flags);
694 root = &iommu->root_entry[bus];
695 context = get_context_addr_from_root(root);
696 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700697 context = (struct context_entry *)
698 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700699 if (!context) {
700 spin_unlock_irqrestore(&iommu->lock, flags);
701 return NULL;
702 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700703 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700704 phy_addr = virt_to_phys((void *)context);
705 set_root_value(root, phy_addr);
706 set_root_present(root);
707 __iommu_flush_cache(iommu, root, sizeof(*root));
708 }
709 spin_unlock_irqrestore(&iommu->lock, flags);
710 return &context[devfn];
711}
712
713static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
714{
715 struct root_entry *root;
716 struct context_entry *context;
717 int ret;
718 unsigned long flags;
719
720 spin_lock_irqsave(&iommu->lock, flags);
721 root = &iommu->root_entry[bus];
722 context = get_context_addr_from_root(root);
723 if (!context) {
724 ret = 0;
725 goto out;
726 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000727 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700728out:
729 spin_unlock_irqrestore(&iommu->lock, flags);
730 return ret;
731}
732
733static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
734{
735 struct root_entry *root;
736 struct context_entry *context;
737 unsigned long flags;
738
739 spin_lock_irqsave(&iommu->lock, flags);
740 root = &iommu->root_entry[bus];
741 context = get_context_addr_from_root(root);
742 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000743 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700744 __iommu_flush_cache(iommu, &context[devfn], \
745 sizeof(*context));
746 }
747 spin_unlock_irqrestore(&iommu->lock, flags);
748}
749
750static void free_context_table(struct intel_iommu *iommu)
751{
752 struct root_entry *root;
753 int i;
754 unsigned long flags;
755 struct context_entry *context;
756
757 spin_lock_irqsave(&iommu->lock, flags);
758 if (!iommu->root_entry) {
759 goto out;
760 }
761 for (i = 0; i < ROOT_ENTRY_NR; i++) {
762 root = &iommu->root_entry[i];
763 context = get_context_addr_from_root(root);
764 if (context)
765 free_pgtable_page(context);
766 }
767 free_pgtable_page(iommu->root_entry);
768 iommu->root_entry = NULL;
769out:
770 spin_unlock_irqrestore(&iommu->lock, flags);
771}
772
David Woodhouseb026fd22009-06-28 10:37:25 +0100773static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
Allen Kay4399c8b2011-10-14 12:32:46 -0700774 unsigned long pfn, int target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700775{
David Woodhouseb026fd22009-06-28 10:37:25 +0100776 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700777 struct dma_pte *parent, *pte = NULL;
778 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700779 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700780
781 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100782 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700783 parent = domain->pgd;
784
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700785 while (level > 0) {
786 void *tmp_page;
787
David Woodhouseb026fd22009-06-28 10:37:25 +0100788 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700789 pte = &parent[offset];
Allen Kay4399c8b2011-10-14 12:32:46 -0700790 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100791 break;
792 if (level == target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700793 break;
794
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000795 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100796 uint64_t pteval;
797
Suresh Siddha4c923d42009-10-02 11:01:24 -0700798 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700799
David Woodhouse206a73c12009-07-01 19:30:28 +0100800 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700801 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100802
David Woodhousec85994e2009-07-01 19:21:24 +0100803 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400804 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100805 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
806 /* Someone else set it while we were thinking; use theirs. */
807 free_pgtable_page(tmp_page);
808 } else {
809 dma_pte_addr(pte);
810 domain_flush_cache(domain, pte, sizeof(*pte));
811 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700812 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000813 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700814 level--;
815 }
816
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700817 return pte;
818}
819
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100820
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700821/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100822static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
823 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100824 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700825{
826 struct dma_pte *parent, *pte = NULL;
827 int total = agaw_to_level(domain->agaw);
828 int offset;
829
830 parent = domain->pgd;
831 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100832 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700833 pte = &parent[offset];
834 if (level == total)
835 return pte;
836
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100837 if (!dma_pte_present(pte)) {
838 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700839 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100840 }
841
842 if (pte->val & DMA_PTE_LARGE_PAGE) {
843 *large_page = total;
844 return pte;
845 }
846
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000847 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700848 total--;
849 }
850 return NULL;
851}
852
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700853/* clear last level pte, a tlb flush should be followed */
Allen Kay292827c2011-10-14 12:31:54 -0700854static int dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100855 unsigned long start_pfn,
856 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700857{
David Woodhouse04b18e62009-06-27 19:15:01 +0100858 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100859 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100860 struct dma_pte *first_pte, *pte;
Allen Kay292827c2011-10-14 12:31:54 -0700861 int order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700862
David Woodhouse04b18e62009-06-27 19:15:01 +0100863 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100864 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700865 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100866
David Woodhouse04b18e62009-06-27 19:15:01 +0100867 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700868 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100869 large_page = 1;
870 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100871 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100872 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100873 continue;
874 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100875 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100876 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100877 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100878 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100879 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
880
David Woodhouse310a5ab2009-06-28 18:52:20 +0100881 domain_flush_cache(domain, first_pte,
882 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700883
884 } while (start_pfn && start_pfn <= last_pfn);
Allen Kay292827c2011-10-14 12:31:54 -0700885
886 order = (large_page - 1) * 9;
887 return order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700888}
889
890/* free page table pages. last level pte should already be cleared */
891static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100892 unsigned long start_pfn,
893 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700894{
David Woodhouse6660c632009-06-27 22:41:00 +0100895 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhousef3a0a522009-06-30 03:40:07 +0100896 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700897 int total = agaw_to_level(domain->agaw);
898 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100899 unsigned long tmp;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100900 int large_page = 2;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700901
David Woodhouse6660c632009-06-27 22:41:00 +0100902 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
903 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700904 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700905
David Woodhousef3a0a522009-06-30 03:40:07 +0100906 /* We don't need lock here; nobody else touches the iova range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700907 level = 2;
908 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100909 tmp = align_to_level(start_pfn, level);
910
David Woodhousef3a0a522009-06-30 03:40:07 +0100911 /* If we can't even clear one PTE at this level, we're done */
David Woodhouse6660c632009-06-27 22:41:00 +0100912 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700913 return;
914
David Woodhouse59c36282009-09-19 07:36:28 -0700915 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100916 large_page = level;
917 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
918 if (large_page > level)
919 level = large_page + 1;
David Woodhousef3a0a522009-06-30 03:40:07 +0100920 if (!pte) {
921 tmp = align_to_level(tmp + 1, level + 1);
922 continue;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700923 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100924 do {
David Woodhouse6a43e572009-07-02 12:02:34 +0100925 if (dma_pte_present(pte)) {
926 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
927 dma_clear_pte(pte);
928 }
David Woodhousef3a0a522009-06-30 03:40:07 +0100929 pte++;
930 tmp += level_size(level);
David Woodhouse75e6bf92009-07-02 11:21:16 +0100931 } while (!first_pte_in_page(pte) &&
932 tmp + level_size(level) - 1 <= last_pfn);
933
David Woodhousef3a0a522009-06-30 03:40:07 +0100934 domain_flush_cache(domain, first_pte,
935 (void *)pte - (void *)first_pte);
936
David Woodhouse59c36282009-09-19 07:36:28 -0700937 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700938 level++;
939 }
940 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100941 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700942 free_pgtable_page(domain->pgd);
943 domain->pgd = NULL;
944 }
945}
946
947/* iommu handling */
948static int iommu_alloc_root_entry(struct intel_iommu *iommu)
949{
950 struct root_entry *root;
951 unsigned long flags;
952
Suresh Siddha4c923d42009-10-02 11:01:24 -0700953 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700954 if (!root)
955 return -ENOMEM;
956
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700957 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700958
959 spin_lock_irqsave(&iommu->lock, flags);
960 iommu->root_entry = root;
961 spin_unlock_irqrestore(&iommu->lock, flags);
962
963 return 0;
964}
965
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700966static void iommu_set_root_entry(struct intel_iommu *iommu)
967{
968 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100969 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700970 unsigned long flag;
971
972 addr = iommu->root_entry;
973
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200974 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700975 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
976
David Woodhousec416daa2009-05-10 20:30:58 +0100977 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700978
979 /* Make sure hardware complete it */
980 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100981 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700982
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200983 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700984}
985
986static void iommu_flush_write_buffer(struct intel_iommu *iommu)
987{
988 u32 val;
989 unsigned long flag;
990
David Woodhouse9af88142009-02-13 23:18:03 +0000991 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700992 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700993
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200994 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100995 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700996
997 /* Make sure hardware complete it */
998 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100999 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001000
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001001 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001002}
1003
1004/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001005static void __iommu_flush_context(struct intel_iommu *iommu,
1006 u16 did, u16 source_id, u8 function_mask,
1007 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001008{
1009 u64 val = 0;
1010 unsigned long flag;
1011
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001012 switch (type) {
1013 case DMA_CCMD_GLOBAL_INVL:
1014 val = DMA_CCMD_GLOBAL_INVL;
1015 break;
1016 case DMA_CCMD_DOMAIN_INVL:
1017 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1018 break;
1019 case DMA_CCMD_DEVICE_INVL:
1020 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1021 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1022 break;
1023 default:
1024 BUG();
1025 }
1026 val |= DMA_CCMD_ICC;
1027
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001028 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001029 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1030
1031 /* Make sure hardware complete it */
1032 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1033 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1034
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001035 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001036}
1037
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001038/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001039static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1040 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001041{
1042 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1043 u64 val = 0, val_iva = 0;
1044 unsigned long flag;
1045
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001046 switch (type) {
1047 case DMA_TLB_GLOBAL_FLUSH:
1048 /* global flush doesn't need set IVA_REG */
1049 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1050 break;
1051 case DMA_TLB_DSI_FLUSH:
1052 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1053 break;
1054 case DMA_TLB_PSI_FLUSH:
1055 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1056 /* Note: always flush non-leaf currently */
1057 val_iva = size_order | addr;
1058 break;
1059 default:
1060 BUG();
1061 }
1062 /* Note: set drain read/write */
1063#if 0
1064 /*
1065 * This is probably to be super secure.. Looks like we can
1066 * ignore it without any impact.
1067 */
1068 if (cap_read_drain(iommu->cap))
1069 val |= DMA_TLB_READ_DRAIN;
1070#endif
1071 if (cap_write_drain(iommu->cap))
1072 val |= DMA_TLB_WRITE_DRAIN;
1073
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001074 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001075 /* Note: Only uses first TLB reg currently */
1076 if (val_iva)
1077 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1078 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1079
1080 /* Make sure hardware complete it */
1081 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1082 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1083
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001084 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001085
1086 /* check IOTLB invalidation granularity */
1087 if (DMA_TLB_IAIG(val) == 0)
1088 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1089 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1090 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001091 (unsigned long long)DMA_TLB_IIRG(type),
1092 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001093}
1094
Yu Zhao93a23a72009-05-18 13:51:37 +08001095static struct device_domain_info *iommu_support_dev_iotlb(
1096 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001097{
Yu Zhao93a23a72009-05-18 13:51:37 +08001098 int found = 0;
1099 unsigned long flags;
1100 struct device_domain_info *info;
1101 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1102
1103 if (!ecap_dev_iotlb_support(iommu->ecap))
1104 return NULL;
1105
1106 if (!iommu->qi)
1107 return NULL;
1108
1109 spin_lock_irqsave(&device_domain_lock, flags);
1110 list_for_each_entry(info, &domain->devices, link)
1111 if (info->bus == bus && info->devfn == devfn) {
1112 found = 1;
1113 break;
1114 }
1115 spin_unlock_irqrestore(&device_domain_lock, flags);
1116
1117 if (!found || !info->dev)
1118 return NULL;
1119
1120 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1121 return NULL;
1122
1123 if (!dmar_find_matched_atsr_unit(info->dev))
1124 return NULL;
1125
1126 info->iommu = iommu;
1127
1128 return info;
1129}
1130
1131static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1132{
1133 if (!info)
1134 return;
1135
1136 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1137}
1138
1139static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1140{
1141 if (!info->dev || !pci_ats_enabled(info->dev))
1142 return;
1143
1144 pci_disable_ats(info->dev);
1145}
1146
1147static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1148 u64 addr, unsigned mask)
1149{
1150 u16 sid, qdep;
1151 unsigned long flags;
1152 struct device_domain_info *info;
1153
1154 spin_lock_irqsave(&device_domain_lock, flags);
1155 list_for_each_entry(info, &domain->devices, link) {
1156 if (!info->dev || !pci_ats_enabled(info->dev))
1157 continue;
1158
1159 sid = info->bus << 8 | info->devfn;
1160 qdep = pci_ats_queue_depth(info->dev);
1161 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1162 }
1163 spin_unlock_irqrestore(&device_domain_lock, flags);
1164}
1165
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001166static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
Nadav Amit82653632010-04-01 13:24:40 +03001167 unsigned long pfn, unsigned int pages, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001168{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001169 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001170 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001171
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001172 BUG_ON(pages == 0);
1173
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001174 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001175 * Fallback to domain selective flush if no PSI support or the size is
1176 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001177 * PSI requires page size to be 2 ^ x, and the base address is naturally
1178 * aligned to the size
1179 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001180 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1181 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001182 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001183 else
1184 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1185 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001186
1187 /*
Nadav Amit82653632010-04-01 13:24:40 +03001188 * In caching mode, changes of pages from non-present to present require
1189 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001190 */
Nadav Amit82653632010-04-01 13:24:40 +03001191 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001192 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001193}
1194
mark grossf8bab732008-02-08 04:18:38 -08001195static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1196{
1197 u32 pmen;
1198 unsigned long flags;
1199
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001200 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001201 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1202 pmen &= ~DMA_PMEN_EPM;
1203 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1204
1205 /* wait for the protected region status bit to clear */
1206 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1207 readl, !(pmen & DMA_PMEN_PRS), pmen);
1208
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001209 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001210}
1211
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001212static int iommu_enable_translation(struct intel_iommu *iommu)
1213{
1214 u32 sts;
1215 unsigned long flags;
1216
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001217 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001218 iommu->gcmd |= DMA_GCMD_TE;
1219 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001220
1221 /* Make sure hardware complete it */
1222 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001223 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001224
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001225 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001226 return 0;
1227}
1228
1229static int iommu_disable_translation(struct intel_iommu *iommu)
1230{
1231 u32 sts;
1232 unsigned long flag;
1233
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001234 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001235 iommu->gcmd &= ~DMA_GCMD_TE;
1236 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1237
1238 /* Make sure hardware complete it */
1239 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001240 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001241
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001242 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001243 return 0;
1244}
1245
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001246
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001247static int iommu_init_domains(struct intel_iommu *iommu)
1248{
1249 unsigned long ndomains;
1250 unsigned long nlongs;
1251
1252 ndomains = cap_ndoms(iommu->cap);
Masanari Iida68aeb962012-01-25 00:25:52 +09001253 pr_debug("IOMMU %d: Number of Domains supported <%ld>\n", iommu->seq_id,
Yinghai Lu680a7522010-04-08 19:58:23 +01001254 ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001255 nlongs = BITS_TO_LONGS(ndomains);
1256
Donald Dutile94a91b52009-08-20 16:51:34 -04001257 spin_lock_init(&iommu->lock);
1258
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001259 /* TBD: there might be 64K domains,
1260 * consider other allocation for future chip
1261 */
1262 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1263 if (!iommu->domain_ids) {
1264 printk(KERN_ERR "Allocating domain id array failed\n");
1265 return -ENOMEM;
1266 }
1267 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1268 GFP_KERNEL);
1269 if (!iommu->domains) {
1270 printk(KERN_ERR "Allocating domain array failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001271 return -ENOMEM;
1272 }
1273
1274 /*
1275 * if Caching mode is set, then invalid translations are tagged
1276 * with domainid 0. Hence we need to pre-allocate it.
1277 */
1278 if (cap_caching_mode(iommu->cap))
1279 set_bit(0, iommu->domain_ids);
1280 return 0;
1281}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001282
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001283
1284static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001285static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001286
1287void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001288{
1289 struct dmar_domain *domain;
1290 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001291 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001292
Donald Dutile94a91b52009-08-20 16:51:34 -04001293 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001294 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Donald Dutile94a91b52009-08-20 16:51:34 -04001295 domain = iommu->domains[i];
1296 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001297
Donald Dutile94a91b52009-08-20 16:51:34 -04001298 spin_lock_irqsave(&domain->iommu_lock, flags);
1299 if (--domain->iommu_count == 0) {
1300 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1301 vm_domain_exit(domain);
1302 else
1303 domain_exit(domain);
1304 }
1305 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001306 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001307 }
1308
1309 if (iommu->gcmd & DMA_GCMD_TE)
1310 iommu_disable_translation(iommu);
1311
1312 if (iommu->irq) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001313 irq_set_handler_data(iommu->irq, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001314 /* This will mask the irq */
1315 free_irq(iommu->irq, iommu);
1316 destroy_irq(iommu->irq);
1317 }
1318
1319 kfree(iommu->domains);
1320 kfree(iommu->domain_ids);
1321
Weidong Hand9630fe2008-12-08 11:06:32 +08001322 g_iommus[iommu->seq_id] = NULL;
1323
1324 /* if all iommus are freed, free g_iommus */
1325 for (i = 0; i < g_num_of_iommus; i++) {
1326 if (g_iommus[i])
1327 break;
1328 }
1329
1330 if (i == g_num_of_iommus)
1331 kfree(g_iommus);
1332
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001333 /* free context mapping */
1334 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001335}
1336
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001337static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001338{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001339 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001340
1341 domain = alloc_domain_mem();
1342 if (!domain)
1343 return NULL;
1344
Suresh Siddha4c923d42009-10-02 11:01:24 -07001345 domain->nid = -1;
Mike Travis1b198bb2012-03-05 15:05:16 -08001346 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Hand71a2f32008-12-07 21:13:41 +08001347 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001348
1349 return domain;
1350}
1351
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001352static int iommu_attach_domain(struct dmar_domain *domain,
1353 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001354{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001355 int num;
1356 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001357 unsigned long flags;
1358
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001359 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001360
1361 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001362
1363 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1364 if (num >= ndomains) {
1365 spin_unlock_irqrestore(&iommu->lock, flags);
1366 printk(KERN_ERR "IOMMU: no free domain ids\n");
1367 return -ENOMEM;
1368 }
1369
1370 domain->id = num;
1371 set_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001372 set_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001373 iommu->domains[num] = domain;
1374 spin_unlock_irqrestore(&iommu->lock, flags);
1375
1376 return 0;
1377}
1378
1379static void iommu_detach_domain(struct dmar_domain *domain,
1380 struct intel_iommu *iommu)
1381{
1382 unsigned long flags;
1383 int num, ndomains;
1384 int found = 0;
1385
1386 spin_lock_irqsave(&iommu->lock, flags);
1387 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001388 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001389 if (iommu->domains[num] == domain) {
1390 found = 1;
1391 break;
1392 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001393 }
1394
1395 if (found) {
1396 clear_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001397 clear_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001398 iommu->domains[num] = NULL;
1399 }
Weidong Han8c11e792008-12-08 15:29:22 +08001400 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001401}
1402
1403static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001404static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001405
Joseph Cihula51a63e62011-03-21 11:04:24 -07001406static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001407{
1408 struct pci_dev *pdev = NULL;
1409 struct iova *iova;
1410 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001411
David Millerf6611972008-02-06 01:36:23 -08001412 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001413
Mark Gross8a443df2008-03-04 14:59:31 -08001414 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1415 &reserved_rbtree_key);
1416
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001417 /* IOAPIC ranges shouldn't be accessed by DMA */
1418 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1419 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001420 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001421 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001422 return -ENODEV;
1423 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001424
1425 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1426 for_each_pci_dev(pdev) {
1427 struct resource *r;
1428
1429 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1430 r = &pdev->resource[i];
1431 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1432 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001433 iova = reserve_iova(&reserved_iova_list,
1434 IOVA_PFN(r->start),
1435 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001436 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001437 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001438 return -ENODEV;
1439 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001440 }
1441 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001442 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001443}
1444
1445static void domain_reserve_special_ranges(struct dmar_domain *domain)
1446{
1447 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1448}
1449
1450static inline int guestwidth_to_adjustwidth(int gaw)
1451{
1452 int agaw;
1453 int r = (gaw - 12) % 9;
1454
1455 if (r == 0)
1456 agaw = gaw;
1457 else
1458 agaw = gaw + 9 - r;
1459 if (agaw > 64)
1460 agaw = 64;
1461 return agaw;
1462}
1463
1464static int domain_init(struct dmar_domain *domain, int guest_width)
1465{
1466 struct intel_iommu *iommu;
1467 int adjust_width, agaw;
1468 unsigned long sagaw;
1469
David Millerf6611972008-02-06 01:36:23 -08001470 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001471 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001472
1473 domain_reserve_special_ranges(domain);
1474
1475 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001476 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001477 if (guest_width > cap_mgaw(iommu->cap))
1478 guest_width = cap_mgaw(iommu->cap);
1479 domain->gaw = guest_width;
1480 adjust_width = guestwidth_to_adjustwidth(guest_width);
1481 agaw = width_to_agaw(adjust_width);
1482 sagaw = cap_sagaw(iommu->cap);
1483 if (!test_bit(agaw, &sagaw)) {
1484 /* hardware doesn't support it, choose a bigger one */
1485 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1486 agaw = find_next_bit(&sagaw, 5, agaw);
1487 if (agaw >= 5)
1488 return -ENODEV;
1489 }
1490 domain->agaw = agaw;
1491 INIT_LIST_HEAD(&domain->devices);
1492
Weidong Han8e6040972008-12-08 15:49:06 +08001493 if (ecap_coherent(iommu->ecap))
1494 domain->iommu_coherency = 1;
1495 else
1496 domain->iommu_coherency = 0;
1497
Sheng Yang58c610b2009-03-18 15:33:05 +08001498 if (ecap_sc_support(iommu->ecap))
1499 domain->iommu_snooping = 1;
1500 else
1501 domain->iommu_snooping = 0;
1502
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001503 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
Weidong Hanc7151a82008-12-08 22:51:37 +08001504 domain->iommu_count = 1;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001505 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001506
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001507 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001508 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001509 if (!domain->pgd)
1510 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001511 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001512 return 0;
1513}
1514
1515static void domain_exit(struct dmar_domain *domain)
1516{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001517 struct dmar_drhd_unit *drhd;
1518 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001519
1520 /* Domain 0 is reserved, so dont process it */
1521 if (!domain)
1522 return;
1523
Alex Williamson7b668352011-05-24 12:02:41 +01001524 /* Flush any lazy unmaps that may reference this domain */
1525 if (!intel_iommu_strict)
1526 flush_unmaps_timeout(0);
1527
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001528 domain_remove_dev_info(domain);
1529 /* destroy iovas */
1530 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001531
1532 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001533 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001534
1535 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001536 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001537
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001538 for_each_active_iommu(iommu, drhd)
Mike Travis1b198bb2012-03-05 15:05:16 -08001539 if (test_bit(iommu->seq_id, domain->iommu_bmp))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001540 iommu_detach_domain(domain, iommu);
1541
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001542 free_domain_mem(domain);
1543}
1544
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001545static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1546 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001547{
1548 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001549 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001550 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001551 struct dma_pte *pgd;
1552 unsigned long num;
1553 unsigned long ndomains;
1554 int id;
1555 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001556 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001557
1558 pr_debug("Set context mapping for %02x:%02x.%d\n",
1559 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001560
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001561 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001562 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1563 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001564
David Woodhouse276dbf992009-04-04 01:45:37 +01001565 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001566 if (!iommu)
1567 return -ENODEV;
1568
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001569 context = device_to_context_entry(iommu, bus, devfn);
1570 if (!context)
1571 return -ENOMEM;
1572 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001573 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001574 spin_unlock_irqrestore(&iommu->lock, flags);
1575 return 0;
1576 }
1577
Weidong Hanea6606b2008-12-08 23:08:15 +08001578 id = domain->id;
1579 pgd = domain->pgd;
1580
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001581 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1582 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001583 int found = 0;
1584
1585 /* find an available domain id for this device in iommu */
1586 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001587 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001588 if (iommu->domains[num] == domain) {
1589 id = num;
1590 found = 1;
1591 break;
1592 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001593 }
1594
1595 if (found == 0) {
1596 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1597 if (num >= ndomains) {
1598 spin_unlock_irqrestore(&iommu->lock, flags);
1599 printk(KERN_ERR "IOMMU: no free domain ids\n");
1600 return -EFAULT;
1601 }
1602
1603 set_bit(num, iommu->domain_ids);
1604 iommu->domains[num] = domain;
1605 id = num;
1606 }
1607
1608 /* Skip top levels of page tables for
1609 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001610 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001611 */
Chris Wright1672af12009-12-02 12:06:34 -08001612 if (translation != CONTEXT_TT_PASS_THROUGH) {
1613 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1614 pgd = phys_to_virt(dma_pte_addr(pgd));
1615 if (!dma_pte_present(pgd)) {
1616 spin_unlock_irqrestore(&iommu->lock, flags);
1617 return -ENOMEM;
1618 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001619 }
1620 }
1621 }
1622
1623 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001624
Yu Zhao93a23a72009-05-18 13:51:37 +08001625 if (translation != CONTEXT_TT_PASS_THROUGH) {
1626 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1627 translation = info ? CONTEXT_TT_DEV_IOTLB :
1628 CONTEXT_TT_MULTI_LEVEL;
1629 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001630 /*
1631 * In pass through mode, AW must be programmed to indicate the largest
1632 * AGAW value supported by hardware. And ASR is ignored by hardware.
1633 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001634 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001635 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001636 else {
1637 context_set_address_root(context, virt_to_phys(pgd));
1638 context_set_address_width(context, iommu->agaw);
1639 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001640
1641 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001642 context_set_fault_enable(context);
1643 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001644 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001645
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001646 /*
1647 * It's a non-present to present mapping. If hardware doesn't cache
1648 * non-present entry we only need to flush the write-buffer. If the
1649 * _does_ cache non-present entries, then it does so in the special
1650 * domain #0, which we have to flush:
1651 */
1652 if (cap_caching_mode(iommu->cap)) {
1653 iommu->flush.flush_context(iommu, 0,
1654 (((u16)bus) << 8) | devfn,
1655 DMA_CCMD_MASK_NOBIT,
1656 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001657 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001658 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001659 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001660 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001661 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001662 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001663
1664 spin_lock_irqsave(&domain->iommu_lock, flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08001665 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08001666 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001667 if (domain->iommu_count == 1)
1668 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001669 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001670 }
1671 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001672 return 0;
1673}
1674
1675static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001676domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1677 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001678{
1679 int ret;
1680 struct pci_dev *tmp, *parent;
1681
David Woodhouse276dbf992009-04-04 01:45:37 +01001682 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001683 pdev->bus->number, pdev->devfn,
1684 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001685 if (ret)
1686 return ret;
1687
1688 /* dependent device mapping */
1689 tmp = pci_find_upstream_pcie_bridge(pdev);
1690 if (!tmp)
1691 return 0;
1692 /* Secondary interface's bus number and devfn 0 */
1693 parent = pdev->bus->self;
1694 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001695 ret = domain_context_mapping_one(domain,
1696 pci_domain_nr(parent->bus),
1697 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001698 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001699 if (ret)
1700 return ret;
1701 parent = parent->bus->self;
1702 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001703 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001704 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001705 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001706 tmp->subordinate->number, 0,
1707 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001708 else /* this is a legacy PCI bridge */
1709 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001710 pci_domain_nr(tmp->bus),
1711 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001712 tmp->devfn,
1713 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001714}
1715
Weidong Han5331fe62008-12-08 23:00:00 +08001716static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001717{
1718 int ret;
1719 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001720 struct intel_iommu *iommu;
1721
David Woodhouse276dbf992009-04-04 01:45:37 +01001722 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1723 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001724 if (!iommu)
1725 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001726
David Woodhouse276dbf992009-04-04 01:45:37 +01001727 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001728 if (!ret)
1729 return ret;
1730 /* dependent device mapping */
1731 tmp = pci_find_upstream_pcie_bridge(pdev);
1732 if (!tmp)
1733 return ret;
1734 /* Secondary interface's bus number and devfn 0 */
1735 parent = pdev->bus->self;
1736 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001737 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001738 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001739 if (!ret)
1740 return ret;
1741 parent = parent->bus->self;
1742 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001743 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001744 return device_context_mapped(iommu, tmp->subordinate->number,
1745 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001746 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001747 return device_context_mapped(iommu, tmp->bus->number,
1748 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001749}
1750
Fenghua Yuf5329592009-08-04 15:09:37 -07001751/* Returns a number of VTD pages, but aligned to MM page size */
1752static inline unsigned long aligned_nrpages(unsigned long host_addr,
1753 size_t size)
1754{
1755 host_addr &= ~PAGE_MASK;
1756 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1757}
1758
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001759/* Return largest possible superpage level for a given mapping */
1760static inline int hardware_largepage_caps(struct dmar_domain *domain,
1761 unsigned long iov_pfn,
1762 unsigned long phy_pfn,
1763 unsigned long pages)
1764{
1765 int support, level = 1;
1766 unsigned long pfnmerge;
1767
1768 support = domain->iommu_superpage;
1769
1770 /* To use a large page, the virtual *and* physical addresses
1771 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1772 of them will mean we have to use smaller pages. So just
1773 merge them and check both at once. */
1774 pfnmerge = iov_pfn | phy_pfn;
1775
1776 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1777 pages >>= VTD_STRIDE_SHIFT;
1778 if (!pages)
1779 break;
1780 pfnmerge >>= VTD_STRIDE_SHIFT;
1781 level++;
1782 support--;
1783 }
1784 return level;
1785}
1786
David Woodhouse9051aa02009-06-29 12:30:54 +01001787static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1788 struct scatterlist *sg, unsigned long phys_pfn,
1789 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001790{
1791 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001792 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001793 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001794 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001795 unsigned int largepage_lvl = 0;
1796 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001797
1798 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1799
1800 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1801 return -EINVAL;
1802
1803 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1804
David Woodhouse9051aa02009-06-29 12:30:54 +01001805 if (sg)
1806 sg_res = 0;
1807 else {
1808 sg_res = nr_pages + 1;
1809 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1810 }
1811
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001812 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001813 uint64_t tmp;
1814
David Woodhousee1605492009-06-29 11:17:38 +01001815 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001816 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001817 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1818 sg->dma_length = sg->length;
1819 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001820 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001821 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001822
David Woodhousee1605492009-06-29 11:17:38 +01001823 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001824 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1825
1826 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01001827 if (!pte)
1828 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001829 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001830 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001831 pteval |= DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001832 /* Ensure that old small page tables are removed to make room
1833 for superpage, if they exist. */
1834 dma_pte_clear_range(domain, iov_pfn,
1835 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1836 dma_pte_free_pagetable(domain, iov_pfn,
1837 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1838 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001839 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001840 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001841
David Woodhousee1605492009-06-29 11:17:38 +01001842 }
1843 /* We don't need lock here, nobody else
1844 * touches the iova range
1845 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001846 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001847 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001848 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001849 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1850 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001851 if (dumps) {
1852 dumps--;
1853 debug_dma_dump_mappings(NULL);
1854 }
1855 WARN_ON(1);
1856 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001857
1858 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1859
1860 BUG_ON(nr_pages < lvl_pages);
1861 BUG_ON(sg_res < lvl_pages);
1862
1863 nr_pages -= lvl_pages;
1864 iov_pfn += lvl_pages;
1865 phys_pfn += lvl_pages;
1866 pteval += lvl_pages * VTD_PAGE_SIZE;
1867 sg_res -= lvl_pages;
1868
1869 /* If the next PTE would be the first in a new page, then we
1870 need to flush the cache on the entries we've just written.
1871 And then we'll need to recalculate 'pte', so clear it and
1872 let it get set again in the if (!pte) block above.
1873
1874 If we're done (!nr_pages) we need to flush the cache too.
1875
1876 Also if we've been setting superpages, we may need to
1877 recalculate 'pte' and switch back to smaller pages for the
1878 end of the mapping, if the trailing size is not enough to
1879 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01001880 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001881 if (!nr_pages || first_pte_in_page(pte) ||
1882 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01001883 domain_flush_cache(domain, first_pte,
1884 (void *)pte - (void *)first_pte);
1885 pte = NULL;
1886 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001887
1888 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01001889 sg = sg_next(sg);
1890 }
1891 return 0;
1892}
1893
David Woodhouse9051aa02009-06-29 12:30:54 +01001894static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1895 struct scatterlist *sg, unsigned long nr_pages,
1896 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001897{
David Woodhouse9051aa02009-06-29 12:30:54 +01001898 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1899}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001900
David Woodhouse9051aa02009-06-29 12:30:54 +01001901static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1902 unsigned long phys_pfn, unsigned long nr_pages,
1903 int prot)
1904{
1905 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001906}
1907
Weidong Hanc7151a82008-12-08 22:51:37 +08001908static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001909{
Weidong Hanc7151a82008-12-08 22:51:37 +08001910 if (!iommu)
1911 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001912
1913 clear_context_table(iommu, bus, devfn);
1914 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001915 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001916 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001917}
1918
David Woodhouse109b9b02012-05-25 17:43:02 +01001919static inline void unlink_domain_info(struct device_domain_info *info)
1920{
1921 assert_spin_locked(&device_domain_lock);
1922 list_del(&info->link);
1923 list_del(&info->global);
1924 if (info->dev)
1925 info->dev->dev.archdata.iommu = NULL;
1926}
1927
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001928static void domain_remove_dev_info(struct dmar_domain *domain)
1929{
1930 struct device_domain_info *info;
1931 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001932 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001933
1934 spin_lock_irqsave(&device_domain_lock, flags);
1935 while (!list_empty(&domain->devices)) {
1936 info = list_entry(domain->devices.next,
1937 struct device_domain_info, link);
David Woodhouse109b9b02012-05-25 17:43:02 +01001938 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001939 spin_unlock_irqrestore(&device_domain_lock, flags);
1940
Yu Zhao93a23a72009-05-18 13:51:37 +08001941 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001942 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001943 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001944 free_devinfo_mem(info);
1945
1946 spin_lock_irqsave(&device_domain_lock, flags);
1947 }
1948 spin_unlock_irqrestore(&device_domain_lock, flags);
1949}
1950
1951/*
1952 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001953 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001954 */
Kay, Allen M38717942008-09-09 18:37:29 +03001955static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001956find_domain(struct pci_dev *pdev)
1957{
1958 struct device_domain_info *info;
1959
1960 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001961 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001962 if (info)
1963 return info->domain;
1964 return NULL;
1965}
1966
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001967/* domain is initialized */
1968static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1969{
1970 struct dmar_domain *domain, *found = NULL;
1971 struct intel_iommu *iommu;
1972 struct dmar_drhd_unit *drhd;
1973 struct device_domain_info *info, *tmp;
1974 struct pci_dev *dev_tmp;
1975 unsigned long flags;
1976 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001977 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001978 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001979
1980 domain = find_domain(pdev);
1981 if (domain)
1982 return domain;
1983
David Woodhouse276dbf992009-04-04 01:45:37 +01001984 segment = pci_domain_nr(pdev->bus);
1985
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001986 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1987 if (dev_tmp) {
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001988 if (pci_is_pcie(dev_tmp)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001989 bus = dev_tmp->subordinate->number;
1990 devfn = 0;
1991 } else {
1992 bus = dev_tmp->bus->number;
1993 devfn = dev_tmp->devfn;
1994 }
1995 spin_lock_irqsave(&device_domain_lock, flags);
1996 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001997 if (info->segment == segment &&
1998 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001999 found = info->domain;
2000 break;
2001 }
2002 }
2003 spin_unlock_irqrestore(&device_domain_lock, flags);
2004 /* pcie-pci bridge already has a domain, uses it */
2005 if (found) {
2006 domain = found;
2007 goto found_domain;
2008 }
2009 }
2010
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002011 domain = alloc_domain();
2012 if (!domain)
2013 goto error;
2014
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002015 /* Allocate new domain for the device */
2016 drhd = dmar_find_matched_drhd_unit(pdev);
2017 if (!drhd) {
2018 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
2019 pci_name(pdev));
Julia Lawalld2900bd2012-07-24 16:18:14 +02002020 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002021 return NULL;
2022 }
2023 iommu = drhd->iommu;
2024
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002025 ret = iommu_attach_domain(domain, iommu);
2026 if (ret) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002027 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002028 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002029 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002030
2031 if (domain_init(domain, gaw)) {
2032 domain_exit(domain);
2033 goto error;
2034 }
2035
2036 /* register pcie-to-pci device */
2037 if (dev_tmp) {
2038 info = alloc_devinfo_mem();
2039 if (!info) {
2040 domain_exit(domain);
2041 goto error;
2042 }
David Woodhouse276dbf992009-04-04 01:45:37 +01002043 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002044 info->bus = bus;
2045 info->devfn = devfn;
2046 info->dev = NULL;
2047 info->domain = domain;
2048 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08002049 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002050
2051 /* pcie-to-pci bridge already has a domain, uses it */
2052 found = NULL;
2053 spin_lock_irqsave(&device_domain_lock, flags);
2054 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01002055 if (tmp->segment == segment &&
2056 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002057 found = tmp->domain;
2058 break;
2059 }
2060 }
2061 if (found) {
Jiri Slaby00dfff72010-06-14 17:17:32 +02002062 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002063 free_devinfo_mem(info);
2064 domain_exit(domain);
2065 domain = found;
2066 } else {
2067 list_add(&info->link, &domain->devices);
2068 list_add(&info->global, &device_domain_list);
Jiri Slaby00dfff72010-06-14 17:17:32 +02002069 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002070 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002071 }
2072
2073found_domain:
2074 info = alloc_devinfo_mem();
2075 if (!info)
2076 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01002077 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002078 info->bus = pdev->bus->number;
2079 info->devfn = pdev->devfn;
2080 info->dev = pdev;
2081 info->domain = domain;
2082 spin_lock_irqsave(&device_domain_lock, flags);
2083 /* somebody is fast */
2084 found = find_domain(pdev);
2085 if (found != NULL) {
2086 spin_unlock_irqrestore(&device_domain_lock, flags);
2087 if (found != domain) {
2088 domain_exit(domain);
2089 domain = found;
2090 }
2091 free_devinfo_mem(info);
2092 return domain;
2093 }
2094 list_add(&info->link, &domain->devices);
2095 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002096 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002097 spin_unlock_irqrestore(&device_domain_lock, flags);
2098 return domain;
2099error:
2100 /* recheck it here, maybe others set it */
2101 return find_domain(pdev);
2102}
2103
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002104static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002105#define IDENTMAP_ALL 1
2106#define IDENTMAP_GFX 2
2107#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002108
David Woodhouseb2132032009-06-26 18:50:28 +01002109static int iommu_domain_identity_map(struct dmar_domain *domain,
2110 unsigned long long start,
2111 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002112{
David Woodhousec5395d52009-06-28 16:35:56 +01002113 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2114 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002115
David Woodhousec5395d52009-06-28 16:35:56 +01002116 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2117 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002118 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002119 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002120 }
2121
David Woodhousec5395d52009-06-28 16:35:56 +01002122 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2123 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002124 /*
2125 * RMRR range might have overlap with physical memory range,
2126 * clear it first
2127 */
David Woodhousec5395d52009-06-28 16:35:56 +01002128 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002129
David Woodhousec5395d52009-06-28 16:35:56 +01002130 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2131 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002132 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002133}
2134
2135static int iommu_prepare_identity_map(struct pci_dev *pdev,
2136 unsigned long long start,
2137 unsigned long long end)
2138{
2139 struct dmar_domain *domain;
2140 int ret;
2141
David Woodhousec7ab48d2009-06-26 19:10:36 +01002142 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002143 if (!domain)
2144 return -ENOMEM;
2145
David Woodhouse19943b02009-08-04 16:19:20 +01002146 /* For _hardware_ passthrough, don't bother. But for software
2147 passthrough, we do it anyway -- it may indicate a memory
2148 range which is reserved in E820, so which didn't get set
2149 up to start with in si_domain */
2150 if (domain == si_domain && hw_pass_through) {
2151 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2152 pci_name(pdev), start, end);
2153 return 0;
2154 }
2155
2156 printk(KERN_INFO
2157 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2158 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002159
David Woodhouse5595b522009-12-02 09:21:55 +00002160 if (end < start) {
2161 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2162 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2163 dmi_get_system_info(DMI_BIOS_VENDOR),
2164 dmi_get_system_info(DMI_BIOS_VERSION),
2165 dmi_get_system_info(DMI_PRODUCT_VERSION));
2166 ret = -EIO;
2167 goto error;
2168 }
2169
David Woodhouse2ff729f2009-08-26 14:25:41 +01002170 if (end >> agaw_to_width(domain->agaw)) {
2171 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2172 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2173 agaw_to_width(domain->agaw),
2174 dmi_get_system_info(DMI_BIOS_VENDOR),
2175 dmi_get_system_info(DMI_BIOS_VERSION),
2176 dmi_get_system_info(DMI_PRODUCT_VERSION));
2177 ret = -EIO;
2178 goto error;
2179 }
David Woodhouse19943b02009-08-04 16:19:20 +01002180
David Woodhouseb2132032009-06-26 18:50:28 +01002181 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002182 if (ret)
2183 goto error;
2184
2185 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002186 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002187 if (ret)
2188 goto error;
2189
2190 return 0;
2191
2192 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002193 domain_exit(domain);
2194 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002195}
2196
2197static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2198 struct pci_dev *pdev)
2199{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002200 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002201 return 0;
2202 return iommu_prepare_identity_map(pdev, rmrr->base_address,
David Woodhouse70e535d2011-05-31 00:22:52 +01002203 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002204}
2205
Suresh Siddhad3f13812011-08-23 17:05:25 -07002206#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002207static inline void iommu_prepare_isa(void)
2208{
2209 struct pci_dev *pdev;
2210 int ret;
2211
2212 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2213 if (!pdev)
2214 return;
2215
David Woodhousec7ab48d2009-06-26 19:10:36 +01002216 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse70e535d2011-05-31 00:22:52 +01002217 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002218
2219 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002220 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2221 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002222
2223}
2224#else
2225static inline void iommu_prepare_isa(void)
2226{
2227 return;
2228}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002229#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002230
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002231static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002232
Matt Kraai071e1372009-08-23 22:30:22 -07002233static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002234{
2235 struct dmar_drhd_unit *drhd;
2236 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002237 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002238
2239 si_domain = alloc_domain();
2240 if (!si_domain)
2241 return -EFAULT;
2242
David Woodhousec7ab48d2009-06-26 19:10:36 +01002243 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002244
2245 for_each_active_iommu(iommu, drhd) {
2246 ret = iommu_attach_domain(si_domain, iommu);
2247 if (ret) {
2248 domain_exit(si_domain);
2249 return -EFAULT;
2250 }
2251 }
2252
2253 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2254 domain_exit(si_domain);
2255 return -EFAULT;
2256 }
2257
2258 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2259
David Woodhouse19943b02009-08-04 16:19:20 +01002260 if (hw)
2261 return 0;
2262
David Woodhousec7ab48d2009-06-26 19:10:36 +01002263 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002264 unsigned long start_pfn, end_pfn;
2265 int i;
2266
2267 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2268 ret = iommu_domain_identity_map(si_domain,
2269 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2270 if (ret)
2271 return ret;
2272 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002273 }
2274
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002275 return 0;
2276}
2277
2278static void domain_remove_one_dev_info(struct dmar_domain *domain,
2279 struct pci_dev *pdev);
2280static int identity_mapping(struct pci_dev *pdev)
2281{
2282 struct device_domain_info *info;
2283
2284 if (likely(!iommu_identity_mapping))
2285 return 0;
2286
Mike Traviscb452a42011-05-28 13:15:03 -05002287 info = pdev->dev.archdata.iommu;
2288 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2289 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002290
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002291 return 0;
2292}
2293
2294static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002295 struct pci_dev *pdev,
2296 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002297{
2298 struct device_domain_info *info;
2299 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002300 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002301
2302 info = alloc_devinfo_mem();
2303 if (!info)
2304 return -ENOMEM;
2305
2306 info->segment = pci_domain_nr(pdev->bus);
2307 info->bus = pdev->bus->number;
2308 info->devfn = pdev->devfn;
2309 info->dev = pdev;
2310 info->domain = domain;
2311
2312 spin_lock_irqsave(&device_domain_lock, flags);
2313 list_add(&info->link, &domain->devices);
2314 list_add(&info->global, &device_domain_list);
2315 pdev->dev.archdata.iommu = info;
2316 spin_unlock_irqrestore(&device_domain_lock, flags);
2317
David Woodhousee2ad23d2012-05-25 17:42:54 +01002318 ret = domain_context_mapping(domain, pdev, translation);
2319 if (ret) {
2320 spin_lock_irqsave(&device_domain_lock, flags);
David Woodhouse109b9b02012-05-25 17:43:02 +01002321 unlink_domain_info(info);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002322 spin_unlock_irqrestore(&device_domain_lock, flags);
2323 free_devinfo_mem(info);
2324 return ret;
2325 }
2326
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002327 return 0;
2328}
2329
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002330static bool device_has_rmrr(struct pci_dev *dev)
2331{
2332 struct dmar_rmrr_unit *rmrr;
2333 int i;
2334
2335 for_each_rmrr_units(rmrr) {
2336 for (i = 0; i < rmrr->devices_cnt; i++) {
2337 /*
2338 * Return TRUE if this RMRR contains the device that
2339 * is passed in.
2340 */
2341 if (rmrr->devices[i] == dev)
2342 return true;
2343 }
2344 }
2345 return false;
2346}
2347
David Woodhouse6941af22009-07-04 18:24:27 +01002348static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2349{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002350
2351 /*
2352 * We want to prevent any device associated with an RMRR from
2353 * getting placed into the SI Domain. This is done because
2354 * problems exist when devices are moved in and out of domains
2355 * and their respective RMRR info is lost. We exempt USB devices
2356 * from this process due to their usage of RMRRs that are known
2357 * to not be needed after BIOS hand-off to OS.
2358 */
2359 if (device_has_rmrr(pdev) &&
2360 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2361 return 0;
2362
David Woodhousee0fc7e02009-09-30 09:12:17 -07002363 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2364 return 1;
2365
2366 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2367 return 1;
2368
2369 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2370 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002371
David Woodhouse3dfc8132009-07-04 19:11:08 +01002372 /*
2373 * We want to start off with all devices in the 1:1 domain, and
2374 * take them out later if we find they can't access all of memory.
2375 *
2376 * However, we can't do this for PCI devices behind bridges,
2377 * because all PCI devices behind the same bridge will end up
2378 * with the same source-id on their transactions.
2379 *
2380 * Practically speaking, we can't change things around for these
2381 * devices at run-time, because we can't be sure there'll be no
2382 * DMA transactions in flight for any of their siblings.
2383 *
2384 * So PCI devices (unless they're on the root bus) as well as
2385 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2386 * the 1:1 domain, just in _case_ one of their siblings turns out
2387 * not to be able to map all of memory.
2388 */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002389 if (!pci_is_pcie(pdev)) {
David Woodhouse3dfc8132009-07-04 19:11:08 +01002390 if (!pci_is_root_bus(pdev->bus))
2391 return 0;
2392 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2393 return 0;
Yijing Wang62f87c02012-07-24 17:20:03 +08002394 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
David Woodhouse3dfc8132009-07-04 19:11:08 +01002395 return 0;
2396
2397 /*
2398 * At boot time, we don't yet know if devices will be 64-bit capable.
2399 * Assume that they will -- if they turn out not to be, then we can
2400 * take them out of the 1:1 domain later.
2401 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002402 if (!startup) {
2403 /*
2404 * If the device's dma_mask is less than the system's memory
2405 * size then this is not a candidate for identity mapping.
2406 */
2407 u64 dma_mask = pdev->dma_mask;
2408
2409 if (pdev->dev.coherent_dma_mask &&
2410 pdev->dev.coherent_dma_mask < dma_mask)
2411 dma_mask = pdev->dev.coherent_dma_mask;
2412
2413 return dma_mask >= dma_get_required_mask(&pdev->dev);
2414 }
David Woodhouse6941af22009-07-04 18:24:27 +01002415
2416 return 1;
2417}
2418
Matt Kraai071e1372009-08-23 22:30:22 -07002419static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002420{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002421 struct pci_dev *pdev = NULL;
2422 int ret;
2423
David Woodhouse19943b02009-08-04 16:19:20 +01002424 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002425 if (ret)
2426 return -EFAULT;
2427
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002428 for_each_pci_dev(pdev) {
David Woodhouse6941af22009-07-04 18:24:27 +01002429 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse5fe60f42009-08-09 10:53:41 +01002430 ret = domain_add_dev_info(si_domain, pdev,
Mike Traviseae460b2012-03-05 15:05:16 -08002431 hw ? CONTEXT_TT_PASS_THROUGH :
2432 CONTEXT_TT_MULTI_LEVEL);
2433 if (ret) {
2434 /* device not associated with an iommu */
2435 if (ret == -ENODEV)
2436 continue;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002437 return ret;
Mike Traviseae460b2012-03-05 15:05:16 -08002438 }
2439 pr_info("IOMMU: %s identity mapping for device %s\n",
2440 hw ? "hardware" : "software", pci_name(pdev));
David Woodhouse62edf5d2009-07-04 10:59:46 +01002441 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002442 }
2443
2444 return 0;
2445}
2446
Joseph Cihulab7792602011-05-03 00:08:37 -07002447static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002448{
2449 struct dmar_drhd_unit *drhd;
2450 struct dmar_rmrr_unit *rmrr;
2451 struct pci_dev *pdev;
2452 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002453 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002454
2455 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002456 * for each drhd
2457 * allocate root
2458 * initialize and program root entry to not present
2459 * endfor
2460 */
2461 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002462 /*
2463 * lock not needed as this is only incremented in the single
2464 * threaded kernel __init code path all other access are read
2465 * only
2466 */
Mike Travis1b198bb2012-03-05 15:05:16 -08002467 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2468 g_num_of_iommus++;
2469 continue;
2470 }
2471 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2472 IOMMU_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002473 }
2474
Weidong Hand9630fe2008-12-08 11:06:32 +08002475 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2476 GFP_KERNEL);
2477 if (!g_iommus) {
2478 printk(KERN_ERR "Allocating global iommu array failed\n");
2479 ret = -ENOMEM;
2480 goto error;
2481 }
2482
mark gross80b20dd2008-04-18 13:53:58 -07002483 deferred_flush = kzalloc(g_num_of_iommus *
2484 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2485 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002486 ret = -ENOMEM;
2487 goto error;
2488 }
2489
mark gross5e0d2a62008-03-04 15:22:08 -08002490 for_each_drhd_unit(drhd) {
2491 if (drhd->ignored)
2492 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002493
2494 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002495 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002496
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002497 ret = iommu_init_domains(iommu);
2498 if (ret)
2499 goto error;
2500
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002501 /*
2502 * TBD:
2503 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002504 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002505 */
2506 ret = iommu_alloc_root_entry(iommu);
2507 if (ret) {
2508 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2509 goto error;
2510 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002511 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002512 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002513 }
2514
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002515 /*
2516 * Start from the sane iommu hardware state.
2517 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002518 for_each_drhd_unit(drhd) {
2519 if (drhd->ignored)
2520 continue;
2521
2522 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002523
2524 /*
2525 * If the queued invalidation is already initialized by us
2526 * (for example, while enabling interrupt-remapping) then
2527 * we got the things already rolling from a sane state.
2528 */
2529 if (iommu->qi)
2530 continue;
2531
2532 /*
2533 * Clear any previous faults.
2534 */
2535 dmar_fault(-1, iommu);
2536 /*
2537 * Disable queued invalidation if supported and already enabled
2538 * before OS handover.
2539 */
2540 dmar_disable_qi(iommu);
2541 }
2542
2543 for_each_drhd_unit(drhd) {
2544 if (drhd->ignored)
2545 continue;
2546
2547 iommu = drhd->iommu;
2548
Youquan Songa77b67d2008-10-16 16:31:56 -07002549 if (dmar_enable_qi(iommu)) {
2550 /*
2551 * Queued Invalidate not enabled, use Register Based
2552 * Invalidate
2553 */
2554 iommu->flush.flush_context = __iommu_flush_context;
2555 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002556 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002557 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002558 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002559 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002560 } else {
2561 iommu->flush.flush_context = qi_flush_context;
2562 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002563 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002564 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002565 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002566 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002567 }
2568 }
2569
David Woodhouse19943b02009-08-04 16:19:20 +01002570 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002571 iommu_identity_mapping |= IDENTMAP_ALL;
2572
Suresh Siddhad3f13812011-08-23 17:05:25 -07002573#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002574 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002575#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002576
2577 check_tylersburg_isoch();
2578
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002579 /*
2580 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002581 * identity mappings for rmrr, gfx, and isa and may fall back to static
2582 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002583 */
David Woodhouse19943b02009-08-04 16:19:20 +01002584 if (iommu_identity_mapping) {
2585 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2586 if (ret) {
2587 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2588 goto error;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002589 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002590 }
David Woodhouse19943b02009-08-04 16:19:20 +01002591 /*
2592 * For each rmrr
2593 * for each dev attached to rmrr
2594 * do
2595 * locate drhd for dev, alloc domain for dev
2596 * allocate free domain
2597 * allocate page table entries for rmrr
2598 * if context not allocated for bus
2599 * allocate and init context
2600 * set present in root table for this bus
2601 * init context with domain, translation etc
2602 * endfor
2603 * endfor
2604 */
2605 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2606 for_each_rmrr_units(rmrr) {
2607 for (i = 0; i < rmrr->devices_cnt; i++) {
2608 pdev = rmrr->devices[i];
2609 /*
2610 * some BIOS lists non-exist devices in DMAR
2611 * table.
2612 */
2613 if (!pdev)
2614 continue;
2615 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2616 if (ret)
2617 printk(KERN_ERR
2618 "IOMMU: mapping reserved region failed\n");
2619 }
2620 }
2621
2622 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002623
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002624 /*
2625 * for each drhd
2626 * enable fault log
2627 * global invalidate context cache
2628 * global invalidate iotlb
2629 * enable translation
2630 */
2631 for_each_drhd_unit(drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002632 if (drhd->ignored) {
2633 /*
2634 * we always have to disable PMRs or DMA may fail on
2635 * this device
2636 */
2637 if (force_on)
2638 iommu_disable_protect_mem_regions(drhd->iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002639 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002640 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002641 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002642
2643 iommu_flush_write_buffer(iommu);
2644
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002645 ret = dmar_set_interrupt(iommu);
2646 if (ret)
2647 goto error;
2648
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002649 iommu_set_root_entry(iommu);
2650
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002651 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002652 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002653
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002654 ret = iommu_enable_translation(iommu);
2655 if (ret)
2656 goto error;
David Woodhouseb94996c2009-09-19 15:28:12 -07002657
2658 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002659 }
2660
2661 return 0;
2662error:
2663 for_each_drhd_unit(drhd) {
2664 if (drhd->ignored)
2665 continue;
2666 iommu = drhd->iommu;
2667 free_iommu(iommu);
2668 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002669 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002670 return ret;
2671}
2672
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002673/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002674static struct iova *intel_alloc_iova(struct device *dev,
2675 struct dmar_domain *domain,
2676 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002677{
2678 struct pci_dev *pdev = to_pci_dev(dev);
2679 struct iova *iova = NULL;
2680
David Woodhouse875764d2009-06-28 21:20:51 +01002681 /* Restrict dma_mask to the width that the iommu can handle */
2682 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2683
2684 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002685 /*
2686 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002687 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002688 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002689 */
David Woodhouse875764d2009-06-28 21:20:51 +01002690 iova = alloc_iova(&domain->iovad, nrpages,
2691 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2692 if (iova)
2693 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002694 }
David Woodhouse875764d2009-06-28 21:20:51 +01002695 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2696 if (unlikely(!iova)) {
2697 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2698 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002699 return NULL;
2700 }
2701
2702 return iova;
2703}
2704
David Woodhouse147202a2009-07-07 19:43:20 +01002705static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002706{
2707 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002708 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002709
2710 domain = get_domain_for_dev(pdev,
2711 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2712 if (!domain) {
2713 printk(KERN_ERR
2714 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002715 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002716 }
2717
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002718 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002719 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002720 ret = domain_context_mapping(domain, pdev,
2721 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002722 if (ret) {
2723 printk(KERN_ERR
2724 "Domain context map for %s failed",
2725 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002726 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002727 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002728 }
2729
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002730 return domain;
2731}
2732
David Woodhouse147202a2009-07-07 19:43:20 +01002733static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2734{
2735 struct device_domain_info *info;
2736
2737 /* No lock here, assumes no domain exit in normal case */
2738 info = dev->dev.archdata.iommu;
2739 if (likely(info))
2740 return info->domain;
2741
2742 return __get_valid_domain_for_dev(dev);
2743}
2744
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002745static int iommu_dummy(struct pci_dev *pdev)
2746{
2747 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2748}
2749
2750/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002751static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002752{
David Woodhouse73676832009-07-04 14:08:36 +01002753 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002754 int found;
2755
David Woodhouse73676832009-07-04 14:08:36 +01002756 if (unlikely(dev->bus != &pci_bus_type))
2757 return 1;
2758
2759 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002760 if (iommu_dummy(pdev))
2761 return 1;
2762
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002763 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002764 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002765
2766 found = identity_mapping(pdev);
2767 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002768 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002769 return 1;
2770 else {
2771 /*
2772 * 32 bit DMA is removed from si_domain and fall back
2773 * to non-identity mapping.
2774 */
2775 domain_remove_one_dev_info(si_domain, pdev);
2776 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2777 pci_name(pdev));
2778 return 0;
2779 }
2780 } else {
2781 /*
2782 * In case of a detached 64 bit DMA device from vm, the device
2783 * is put into si_domain for identity mapping.
2784 */
David Woodhouse6941af22009-07-04 18:24:27 +01002785 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002786 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002787 ret = domain_add_dev_info(si_domain, pdev,
2788 hw_pass_through ?
2789 CONTEXT_TT_PASS_THROUGH :
2790 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002791 if (!ret) {
2792 printk(KERN_INFO "64bit %s uses identity mapping\n",
2793 pci_name(pdev));
2794 return 1;
2795 }
2796 }
2797 }
2798
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002799 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002800}
2801
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002802static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2803 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002804{
2805 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002806 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002807 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002808 struct iova *iova;
2809 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002810 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002811 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002812 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002813
2814 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002815
David Woodhouse73676832009-07-04 14:08:36 +01002816 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002817 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002818
2819 domain = get_valid_domain_for_dev(pdev);
2820 if (!domain)
2821 return 0;
2822
Weidong Han8c11e792008-12-08 15:29:22 +08002823 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002824 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002825
Mike Travisc681d0b2011-05-28 13:15:05 -05002826 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002827 if (!iova)
2828 goto error;
2829
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002830 /*
2831 * Check if DMAR supports zero-length reads on write only
2832 * mappings..
2833 */
2834 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002835 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002836 prot |= DMA_PTE_READ;
2837 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2838 prot |= DMA_PTE_WRITE;
2839 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002840 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002841 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002842 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002843 * is not a big problem
2844 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002845 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002846 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002847 if (ret)
2848 goto error;
2849
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002850 /* it's a non-present to present mapping. Only flush if caching mode */
2851 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002852 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002853 else
Weidong Han8c11e792008-12-08 15:29:22 +08002854 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002855
David Woodhouse03d6a242009-06-28 15:33:46 +01002856 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2857 start_paddr += paddr & ~PAGE_MASK;
2858 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002859
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002860error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002861 if (iova)
2862 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002863 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002864 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002865 return 0;
2866}
2867
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002868static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2869 unsigned long offset, size_t size,
2870 enum dma_data_direction dir,
2871 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002872{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002873 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2874 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002875}
2876
mark gross5e0d2a62008-03-04 15:22:08 -08002877static void flush_unmaps(void)
2878{
mark gross80b20dd2008-04-18 13:53:58 -07002879 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002880
mark gross5e0d2a62008-03-04 15:22:08 -08002881 timer_on = 0;
2882
2883 /* just flush them all */
2884 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002885 struct intel_iommu *iommu = g_iommus[i];
2886 if (!iommu)
2887 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002888
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002889 if (!deferred_flush[i].next)
2890 continue;
2891
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002892 /* In caching mode, global flushes turn emulation expensive */
2893 if (!cap_caching_mode(iommu->cap))
2894 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002895 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002896 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002897 unsigned long mask;
2898 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002899 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08002900
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002901 /* On real hardware multiple invalidations are expensive */
2902 if (cap_caching_mode(iommu->cap))
2903 iommu_flush_iotlb_psi(iommu, domain->id,
2904 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2905 else {
2906 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2907 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2908 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2909 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002910 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002911 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002912 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002913 }
2914
mark gross5e0d2a62008-03-04 15:22:08 -08002915 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002916}
2917
2918static void flush_unmaps_timeout(unsigned long data)
2919{
mark gross80b20dd2008-04-18 13:53:58 -07002920 unsigned long flags;
2921
2922 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002923 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002924 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002925}
2926
2927static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2928{
2929 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002930 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002931 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002932
2933 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002934 if (list_size == HIGH_WATER_MARK)
2935 flush_unmaps();
2936
Weidong Han8c11e792008-12-08 15:29:22 +08002937 iommu = domain_get_iommu(dom);
2938 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002939
mark gross80b20dd2008-04-18 13:53:58 -07002940 next = deferred_flush[iommu_id].next;
2941 deferred_flush[iommu_id].domain[next] = dom;
2942 deferred_flush[iommu_id].iova[next] = iova;
2943 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002944
2945 if (!timer_on) {
2946 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2947 timer_on = 1;
2948 }
2949 list_size++;
2950 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2951}
2952
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002953static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2954 size_t size, enum dma_data_direction dir,
2955 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002956{
2957 struct pci_dev *pdev = to_pci_dev(dev);
2958 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002959 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002960 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002961 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002962
David Woodhouse73676832009-07-04 14:08:36 +01002963 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002964 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002965
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002966 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002967 BUG_ON(!domain);
2968
Weidong Han8c11e792008-12-08 15:29:22 +08002969 iommu = domain_get_iommu(domain);
2970
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002971 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002972 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2973 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002974 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002975
David Woodhoused794dc92009-06-28 00:27:49 +01002976 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2977 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002978
David Woodhoused794dc92009-06-28 00:27:49 +01002979 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2980 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002981
2982 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002983 dma_pte_clear_range(domain, start_pfn, last_pfn);
2984
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002985 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002986 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2987
mark gross5e0d2a62008-03-04 15:22:08 -08002988 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002989 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002990 last_pfn - start_pfn + 1, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08002991 /* free iova */
2992 __free_iova(&domain->iovad, iova);
2993 } else {
2994 add_unmap(domain, iova);
2995 /*
2996 * queue up the release of the unmap to save the 1/6th of the
2997 * cpu used up by the iotlb flush operation...
2998 */
mark gross5e0d2a62008-03-04 15:22:08 -08002999 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003000}
3001
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003002static void *intel_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003003 dma_addr_t *dma_handle, gfp_t flags,
3004 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003005{
3006 void *vaddr;
3007 int order;
3008
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003009 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003010 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003011
3012 if (!iommu_no_mapping(hwdev))
3013 flags &= ~(GFP_DMA | GFP_DMA32);
3014 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
3015 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
3016 flags |= GFP_DMA;
3017 else
3018 flags |= GFP_DMA32;
3019 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003020
3021 vaddr = (void *)__get_free_pages(flags, order);
3022 if (!vaddr)
3023 return NULL;
3024 memset(vaddr, 0, size);
3025
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003026 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
3027 DMA_BIDIRECTIONAL,
3028 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003029 if (*dma_handle)
3030 return vaddr;
3031 free_pages((unsigned long)vaddr, order);
3032 return NULL;
3033}
3034
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003035static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003036 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003037{
3038 int order;
3039
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003040 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003041 order = get_order(size);
3042
David Woodhouse0db9b7a2009-07-14 02:01:57 +01003043 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003044 free_pages((unsigned long)vaddr, order);
3045}
3046
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003047static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
3048 int nelems, enum dma_data_direction dir,
3049 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003050{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003051 struct pci_dev *pdev = to_pci_dev(hwdev);
3052 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003053 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003054 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003055 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003056
David Woodhouse73676832009-07-04 14:08:36 +01003057 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003058 return;
3059
3060 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08003061 BUG_ON(!domain);
3062
3063 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003064
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003065 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01003066 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3067 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003068 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003069
David Woodhoused794dc92009-06-28 00:27:49 +01003070 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3071 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003072
3073 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01003074 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003075
David Woodhoused794dc92009-06-28 00:27:49 +01003076 /* free page tables */
3077 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3078
David Woodhouseacea0012009-07-14 01:55:11 +01003079 if (intel_iommu_strict) {
3080 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03003081 last_pfn - start_pfn + 1, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003082 /* free iova */
3083 __free_iova(&domain->iovad, iova);
3084 } else {
3085 add_unmap(domain, iova);
3086 /*
3087 * queue up the release of the unmap to save the 1/6th of the
3088 * cpu used up by the iotlb flush operation...
3089 */
3090 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003091}
3092
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003093static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003094 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003095{
3096 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003097 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003098
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003099 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003100 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003101 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003102 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003103 }
3104 return nelems;
3105}
3106
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003107static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3108 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003109{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003110 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003111 struct pci_dev *pdev = to_pci_dev(hwdev);
3112 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003113 size_t size = 0;
3114 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003115 struct iova *iova = NULL;
3116 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003117 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003118 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003119 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003120
3121 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01003122 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003123 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003124
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003125 domain = get_valid_domain_for_dev(pdev);
3126 if (!domain)
3127 return 0;
3128
Weidong Han8c11e792008-12-08 15:29:22 +08003129 iommu = domain_get_iommu(domain);
3130
David Woodhouseb536d242009-06-28 14:49:31 +01003131 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003132 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003133
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003134 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3135 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003136 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003137 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003138 return 0;
3139 }
3140
3141 /*
3142 * Check if DMAR supports zero-length reads on write only
3143 * mappings..
3144 */
3145 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003146 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003147 prot |= DMA_PTE_READ;
3148 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3149 prot |= DMA_PTE_WRITE;
3150
David Woodhouseb536d242009-06-28 14:49:31 +01003151 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003152
Fenghua Yuf5329592009-08-04 15:09:37 -07003153 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003154 if (unlikely(ret)) {
3155 /* clear the page */
3156 dma_pte_clear_range(domain, start_vpfn,
3157 start_vpfn + size - 1);
3158 /* free page tables */
3159 dma_pte_free_pagetable(domain, start_vpfn,
3160 start_vpfn + size - 1);
3161 /* free iova */
3162 __free_iova(&domain->iovad, iova);
3163 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003164 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003165
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003166 /* it's a non-present to present mapping. Only flush if caching mode */
3167 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03003168 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003169 else
Weidong Han8c11e792008-12-08 15:29:22 +08003170 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003171
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003172 return nelems;
3173}
3174
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003175static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3176{
3177 return !dma_addr;
3178}
3179
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003180struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003181 .alloc = intel_alloc_coherent,
3182 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003183 .map_sg = intel_map_sg,
3184 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003185 .map_page = intel_map_page,
3186 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003187 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003188};
3189
3190static inline int iommu_domain_cache_init(void)
3191{
3192 int ret = 0;
3193
3194 iommu_domain_cache = kmem_cache_create("iommu_domain",
3195 sizeof(struct dmar_domain),
3196 0,
3197 SLAB_HWCACHE_ALIGN,
3198
3199 NULL);
3200 if (!iommu_domain_cache) {
3201 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3202 ret = -ENOMEM;
3203 }
3204
3205 return ret;
3206}
3207
3208static inline int iommu_devinfo_cache_init(void)
3209{
3210 int ret = 0;
3211
3212 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3213 sizeof(struct device_domain_info),
3214 0,
3215 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003216 NULL);
3217 if (!iommu_devinfo_cache) {
3218 printk(KERN_ERR "Couldn't create devinfo cache\n");
3219 ret = -ENOMEM;
3220 }
3221
3222 return ret;
3223}
3224
3225static inline int iommu_iova_cache_init(void)
3226{
3227 int ret = 0;
3228
3229 iommu_iova_cache = kmem_cache_create("iommu_iova",
3230 sizeof(struct iova),
3231 0,
3232 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003233 NULL);
3234 if (!iommu_iova_cache) {
3235 printk(KERN_ERR "Couldn't create iova cache\n");
3236 ret = -ENOMEM;
3237 }
3238
3239 return ret;
3240}
3241
3242static int __init iommu_init_mempool(void)
3243{
3244 int ret;
3245 ret = iommu_iova_cache_init();
3246 if (ret)
3247 return ret;
3248
3249 ret = iommu_domain_cache_init();
3250 if (ret)
3251 goto domain_error;
3252
3253 ret = iommu_devinfo_cache_init();
3254 if (!ret)
3255 return ret;
3256
3257 kmem_cache_destroy(iommu_domain_cache);
3258domain_error:
3259 kmem_cache_destroy(iommu_iova_cache);
3260
3261 return -ENOMEM;
3262}
3263
3264static void __init iommu_exit_mempool(void)
3265{
3266 kmem_cache_destroy(iommu_devinfo_cache);
3267 kmem_cache_destroy(iommu_domain_cache);
3268 kmem_cache_destroy(iommu_iova_cache);
3269
3270}
3271
Dan Williams556ab452010-07-23 15:47:56 -07003272static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3273{
3274 struct dmar_drhd_unit *drhd;
3275 u32 vtbar;
3276 int rc;
3277
3278 /* We know that this device on this chipset has its own IOMMU.
3279 * If we find it under a different IOMMU, then the BIOS is lying
3280 * to us. Hope that the IOMMU for this device is actually
3281 * disabled, and it needs no translation...
3282 */
3283 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3284 if (rc) {
3285 /* "can't" happen */
3286 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3287 return;
3288 }
3289 vtbar &= 0xffff0000;
3290
3291 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3292 drhd = dmar_find_matched_drhd_unit(pdev);
3293 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3294 TAINT_FIRMWARE_WORKAROUND,
3295 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3296 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3297}
3298DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3299
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003300static void __init init_no_remapping_devices(void)
3301{
3302 struct dmar_drhd_unit *drhd;
3303
3304 for_each_drhd_unit(drhd) {
3305 if (!drhd->include_all) {
3306 int i;
3307 for (i = 0; i < drhd->devices_cnt; i++)
3308 if (drhd->devices[i] != NULL)
3309 break;
3310 /* ignore DMAR unit if no pci devices exist */
3311 if (i == drhd->devices_cnt)
3312 drhd->ignored = 1;
3313 }
3314 }
3315
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003316 for_each_drhd_unit(drhd) {
3317 int i;
3318 if (drhd->ignored || drhd->include_all)
3319 continue;
3320
3321 for (i = 0; i < drhd->devices_cnt; i++)
3322 if (drhd->devices[i] &&
David Woodhousec0771df2011-10-14 20:59:46 +01003323 !IS_GFX_DEVICE(drhd->devices[i]))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003324 break;
3325
3326 if (i < drhd->devices_cnt)
3327 continue;
3328
David Woodhousec0771df2011-10-14 20:59:46 +01003329 /* This IOMMU has *only* gfx devices. Either bypass it or
3330 set the gfx_mapped flag, as appropriate */
3331 if (dmar_map_gfx) {
3332 intel_iommu_gfx_mapped = 1;
3333 } else {
3334 drhd->ignored = 1;
3335 for (i = 0; i < drhd->devices_cnt; i++) {
3336 if (!drhd->devices[i])
3337 continue;
3338 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3339 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003340 }
3341 }
3342}
3343
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003344#ifdef CONFIG_SUSPEND
3345static int init_iommu_hw(void)
3346{
3347 struct dmar_drhd_unit *drhd;
3348 struct intel_iommu *iommu = NULL;
3349
3350 for_each_active_iommu(iommu, drhd)
3351 if (iommu->qi)
3352 dmar_reenable_qi(iommu);
3353
Joseph Cihulab7792602011-05-03 00:08:37 -07003354 for_each_iommu(iommu, drhd) {
3355 if (drhd->ignored) {
3356 /*
3357 * we always have to disable PMRs or DMA may fail on
3358 * this device
3359 */
3360 if (force_on)
3361 iommu_disable_protect_mem_regions(iommu);
3362 continue;
3363 }
3364
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003365 iommu_flush_write_buffer(iommu);
3366
3367 iommu_set_root_entry(iommu);
3368
3369 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003370 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003371 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003372 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003373 if (iommu_enable_translation(iommu))
3374 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003375 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003376 }
3377
3378 return 0;
3379}
3380
3381static void iommu_flush_all(void)
3382{
3383 struct dmar_drhd_unit *drhd;
3384 struct intel_iommu *iommu;
3385
3386 for_each_active_iommu(iommu, drhd) {
3387 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003388 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003389 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003390 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003391 }
3392}
3393
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003394static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003395{
3396 struct dmar_drhd_unit *drhd;
3397 struct intel_iommu *iommu = NULL;
3398 unsigned long flag;
3399
3400 for_each_active_iommu(iommu, drhd) {
3401 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3402 GFP_ATOMIC);
3403 if (!iommu->iommu_state)
3404 goto nomem;
3405 }
3406
3407 iommu_flush_all();
3408
3409 for_each_active_iommu(iommu, drhd) {
3410 iommu_disable_translation(iommu);
3411
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003412 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003413
3414 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3415 readl(iommu->reg + DMAR_FECTL_REG);
3416 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3417 readl(iommu->reg + DMAR_FEDATA_REG);
3418 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3419 readl(iommu->reg + DMAR_FEADDR_REG);
3420 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3421 readl(iommu->reg + DMAR_FEUADDR_REG);
3422
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003423 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003424 }
3425 return 0;
3426
3427nomem:
3428 for_each_active_iommu(iommu, drhd)
3429 kfree(iommu->iommu_state);
3430
3431 return -ENOMEM;
3432}
3433
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003434static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003435{
3436 struct dmar_drhd_unit *drhd;
3437 struct intel_iommu *iommu = NULL;
3438 unsigned long flag;
3439
3440 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003441 if (force_on)
3442 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3443 else
3444 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003445 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003446 }
3447
3448 for_each_active_iommu(iommu, drhd) {
3449
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003450 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003451
3452 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3453 iommu->reg + DMAR_FECTL_REG);
3454 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3455 iommu->reg + DMAR_FEDATA_REG);
3456 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3457 iommu->reg + DMAR_FEADDR_REG);
3458 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3459 iommu->reg + DMAR_FEUADDR_REG);
3460
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003461 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003462 }
3463
3464 for_each_active_iommu(iommu, drhd)
3465 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003466}
3467
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003468static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003469 .resume = iommu_resume,
3470 .suspend = iommu_suspend,
3471};
3472
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003473static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003474{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003475 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003476}
3477
3478#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003479static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003480#endif /* CONFIG_PM */
3481
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003482LIST_HEAD(dmar_rmrr_units);
3483
3484static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3485{
3486 list_add(&rmrr->list, &dmar_rmrr_units);
3487}
3488
3489
3490int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3491{
3492 struct acpi_dmar_reserved_memory *rmrr;
3493 struct dmar_rmrr_unit *rmrru;
3494
3495 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3496 if (!rmrru)
3497 return -ENOMEM;
3498
3499 rmrru->hdr = header;
3500 rmrr = (struct acpi_dmar_reserved_memory *)header;
3501 rmrru->base_address = rmrr->base_address;
3502 rmrru->end_address = rmrr->end_address;
3503
3504 dmar_register_rmrr_unit(rmrru);
3505 return 0;
3506}
3507
3508static int __init
3509rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3510{
3511 struct acpi_dmar_reserved_memory *rmrr;
3512 int ret;
3513
3514 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3515 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3516 ((void *)rmrr) + rmrr->header.length,
3517 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3518
3519 if (ret || (rmrru->devices_cnt == 0)) {
3520 list_del(&rmrru->list);
3521 kfree(rmrru);
3522 }
3523 return ret;
3524}
3525
3526static LIST_HEAD(dmar_atsr_units);
3527
3528int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3529{
3530 struct acpi_dmar_atsr *atsr;
3531 struct dmar_atsr_unit *atsru;
3532
3533 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3534 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3535 if (!atsru)
3536 return -ENOMEM;
3537
3538 atsru->hdr = hdr;
3539 atsru->include_all = atsr->flags & 0x1;
3540
3541 list_add(&atsru->list, &dmar_atsr_units);
3542
3543 return 0;
3544}
3545
3546static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3547{
3548 int rc;
3549 struct acpi_dmar_atsr *atsr;
3550
3551 if (atsru->include_all)
3552 return 0;
3553
3554 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3555 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3556 (void *)atsr + atsr->header.length,
3557 &atsru->devices_cnt, &atsru->devices,
3558 atsr->segment);
3559 if (rc || !atsru->devices_cnt) {
3560 list_del(&atsru->list);
3561 kfree(atsru);
3562 }
3563
3564 return rc;
3565}
3566
3567int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3568{
3569 int i;
3570 struct pci_bus *bus;
3571 struct acpi_dmar_atsr *atsr;
3572 struct dmar_atsr_unit *atsru;
3573
3574 dev = pci_physfn(dev);
3575
3576 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3577 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3578 if (atsr->segment == pci_domain_nr(dev->bus))
3579 goto found;
3580 }
3581
3582 return 0;
3583
3584found:
3585 for (bus = dev->bus; bus; bus = bus->parent) {
3586 struct pci_dev *bridge = bus->self;
3587
3588 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003589 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003590 return 0;
3591
Yijing Wang62f87c02012-07-24 17:20:03 +08003592 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003593 for (i = 0; i < atsru->devices_cnt; i++)
3594 if (atsru->devices[i] == bridge)
3595 return 1;
3596 break;
3597 }
3598 }
3599
3600 if (atsru->include_all)
3601 return 1;
3602
3603 return 0;
3604}
3605
Sergey Senozhatskyc8f369a2011-10-26 18:45:39 +03003606int __init dmar_parse_rmrr_atsr_dev(void)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003607{
3608 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3609 struct dmar_atsr_unit *atsr, *atsr_n;
3610 int ret = 0;
3611
3612 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3613 ret = rmrr_parse_dev(rmrr);
3614 if (ret)
3615 return ret;
3616 }
3617
3618 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3619 ret = atsr_parse_dev(atsr);
3620 if (ret)
3621 return ret;
3622 }
3623
3624 return ret;
3625}
3626
Fenghua Yu99dcade2009-11-11 07:23:06 -08003627/*
3628 * Here we only respond to action of unbound device from driver.
3629 *
3630 * Added device is not attached to its DMAR domain here yet. That will happen
3631 * when mapping the device to iova.
3632 */
3633static int device_notifier(struct notifier_block *nb,
3634 unsigned long action, void *data)
3635{
3636 struct device *dev = data;
3637 struct pci_dev *pdev = to_pci_dev(dev);
3638 struct dmar_domain *domain;
3639
David Woodhouse44cd6132009-12-02 10:18:30 +00003640 if (iommu_no_mapping(dev))
3641 return 0;
3642
Fenghua Yu99dcade2009-11-11 07:23:06 -08003643 domain = find_domain(pdev);
3644 if (!domain)
3645 return 0;
3646
Alex Williamsona97590e2011-03-04 14:52:16 -07003647 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
Fenghua Yu99dcade2009-11-11 07:23:06 -08003648 domain_remove_one_dev_info(domain, pdev);
3649
Alex Williamsona97590e2011-03-04 14:52:16 -07003650 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3651 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3652 list_empty(&domain->devices))
3653 domain_exit(domain);
3654 }
3655
Fenghua Yu99dcade2009-11-11 07:23:06 -08003656 return 0;
3657}
3658
3659static struct notifier_block device_nb = {
3660 .notifier_call = device_notifier,
3661};
3662
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003663int __init intel_iommu_init(void)
3664{
3665 int ret = 0;
3666
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003667 /* VT-d is required for a TXT/tboot launch, so enforce that */
3668 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003669
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003670 if (dmar_table_init()) {
3671 if (force_on)
3672 panic("tboot: Failed to initialize DMAR table\n");
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003673 return -ENODEV;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003674 }
3675
Suresh Siddhac2c72862011-08-23 17:05:19 -07003676 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003677 if (force_on)
3678 panic("tboot: Failed to initialize DMAR device scope\n");
3679 return -ENODEV;
3680 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003681
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003682 if (no_iommu || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003683 return -ENODEV;
3684
Joseph Cihula51a63e62011-03-21 11:04:24 -07003685 if (iommu_init_mempool()) {
3686 if (force_on)
3687 panic("tboot: Failed to initialize iommu memory\n");
3688 return -ENODEV;
3689 }
3690
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003691 if (list_empty(&dmar_rmrr_units))
3692 printk(KERN_INFO "DMAR: No RMRR found\n");
3693
3694 if (list_empty(&dmar_atsr_units))
3695 printk(KERN_INFO "DMAR: No ATSR found\n");
3696
Joseph Cihula51a63e62011-03-21 11:04:24 -07003697 if (dmar_init_reserved_ranges()) {
3698 if (force_on)
3699 panic("tboot: Failed to reserve iommu ranges\n");
3700 return -ENODEV;
3701 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003702
3703 init_no_remapping_devices();
3704
Joseph Cihulab7792602011-05-03 00:08:37 -07003705 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003706 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003707 if (force_on)
3708 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003709 printk(KERN_ERR "IOMMU: dmar init failed\n");
3710 put_iova_domain(&reserved_iova_list);
3711 iommu_exit_mempool();
3712 return ret;
3713 }
3714 printk(KERN_INFO
3715 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3716
mark gross5e0d2a62008-03-04 15:22:08 -08003717 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003718#ifdef CONFIG_SWIOTLB
3719 swiotlb = 0;
3720#endif
David Woodhouse19943b02009-08-04 16:19:20 +01003721 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003722
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003723 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003724
Joerg Roedel4236d97d2011-09-06 17:56:07 +02003725 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003726
Fenghua Yu99dcade2009-11-11 07:23:06 -08003727 bus_register_notifier(&pci_bus_type, &device_nb);
3728
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02003729 intel_iommu_enabled = 1;
3730
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003731 return 0;
3732}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003733
Han, Weidong3199aa62009-02-26 17:31:12 +08003734static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3735 struct pci_dev *pdev)
3736{
3737 struct pci_dev *tmp, *parent;
3738
3739 if (!iommu || !pdev)
3740 return;
3741
3742 /* dependent device detach */
3743 tmp = pci_find_upstream_pcie_bridge(pdev);
3744 /* Secondary interface's bus number and devfn 0 */
3745 if (tmp) {
3746 parent = pdev->bus->self;
3747 while (parent != tmp) {
3748 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003749 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003750 parent = parent->bus->self;
3751 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05003752 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08003753 iommu_detach_dev(iommu,
3754 tmp->subordinate->number, 0);
3755 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003756 iommu_detach_dev(iommu, tmp->bus->number,
3757 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003758 }
3759}
3760
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003761static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003762 struct pci_dev *pdev)
3763{
3764 struct device_domain_info *info;
3765 struct intel_iommu *iommu;
3766 unsigned long flags;
3767 int found = 0;
3768 struct list_head *entry, *tmp;
3769
David Woodhouse276dbf992009-04-04 01:45:37 +01003770 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3771 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003772 if (!iommu)
3773 return;
3774
3775 spin_lock_irqsave(&device_domain_lock, flags);
3776 list_for_each_safe(entry, tmp, &domain->devices) {
3777 info = list_entry(entry, struct device_domain_info, link);
Mike Habeck8519dc42011-05-28 13:15:07 -05003778 if (info->segment == pci_domain_nr(pdev->bus) &&
3779 info->bus == pdev->bus->number &&
Weidong Hanc7151a82008-12-08 22:51:37 +08003780 info->devfn == pdev->devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01003781 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003782 spin_unlock_irqrestore(&device_domain_lock, flags);
3783
Yu Zhao93a23a72009-05-18 13:51:37 +08003784 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003785 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003786 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003787 free_devinfo_mem(info);
3788
3789 spin_lock_irqsave(&device_domain_lock, flags);
3790
3791 if (found)
3792 break;
3793 else
3794 continue;
3795 }
3796
3797 /* if there is no other devices under the same iommu
3798 * owned by this domain, clear this iommu in iommu_bmp
3799 * update iommu count and coherency
3800 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003801 if (iommu == device_to_iommu(info->segment, info->bus,
3802 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003803 found = 1;
3804 }
3805
Roland Dreier3e7abe22011-07-20 06:22:21 -07003806 spin_unlock_irqrestore(&device_domain_lock, flags);
3807
Weidong Hanc7151a82008-12-08 22:51:37 +08003808 if (found == 0) {
3809 unsigned long tmp_flags;
3810 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08003811 clear_bit(iommu->seq_id, domain->iommu_bmp);
Weidong Hanc7151a82008-12-08 22:51:37 +08003812 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003813 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003814 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07003815
Alex Williamson9b4554b2011-05-24 12:19:04 -04003816 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3817 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3818 spin_lock_irqsave(&iommu->lock, tmp_flags);
3819 clear_bit(domain->id, iommu->domain_ids);
3820 iommu->domains[domain->id] = NULL;
3821 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3822 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003823 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003824}
3825
3826static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3827{
3828 struct device_domain_info *info;
3829 struct intel_iommu *iommu;
3830 unsigned long flags1, flags2;
3831
3832 spin_lock_irqsave(&device_domain_lock, flags1);
3833 while (!list_empty(&domain->devices)) {
3834 info = list_entry(domain->devices.next,
3835 struct device_domain_info, link);
David Woodhouse109b9b02012-05-25 17:43:02 +01003836 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003837 spin_unlock_irqrestore(&device_domain_lock, flags1);
3838
Yu Zhao93a23a72009-05-18 13:51:37 +08003839 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003840 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003841 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003842 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003843
3844 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003845 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003846 */
3847 spin_lock_irqsave(&domain->iommu_lock, flags2);
3848 if (test_and_clear_bit(iommu->seq_id,
Mike Travis1b198bb2012-03-05 15:05:16 -08003849 domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08003850 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003851 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003852 }
3853 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3854
3855 free_devinfo_mem(info);
3856 spin_lock_irqsave(&device_domain_lock, flags1);
3857 }
3858 spin_unlock_irqrestore(&device_domain_lock, flags1);
3859}
3860
Weidong Han5e98c4b2008-12-08 23:03:27 +08003861/* domain id for virtual machine, it won't be set in context */
3862static unsigned long vm_domid;
3863
3864static struct dmar_domain *iommu_alloc_vm_domain(void)
3865{
3866 struct dmar_domain *domain;
3867
3868 domain = alloc_domain_mem();
3869 if (!domain)
3870 return NULL;
3871
3872 domain->id = vm_domid++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003873 domain->nid = -1;
Mike Travis1b198bb2012-03-05 15:05:16 -08003874 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003875 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3876
3877 return domain;
3878}
3879
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003880static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003881{
3882 int adjust_width;
3883
3884 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003885 spin_lock_init(&domain->iommu_lock);
3886
3887 domain_reserve_special_ranges(domain);
3888
3889 /* calculate AGAW */
3890 domain->gaw = guest_width;
3891 adjust_width = guestwidth_to_adjustwidth(guest_width);
3892 domain->agaw = width_to_agaw(adjust_width);
3893
3894 INIT_LIST_HEAD(&domain->devices);
3895
3896 domain->iommu_count = 0;
3897 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003898 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01003899 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003900 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003901 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003902
3903 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07003904 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003905 if (!domain->pgd)
3906 return -ENOMEM;
3907 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3908 return 0;
3909}
3910
3911static void iommu_free_vm_domain(struct dmar_domain *domain)
3912{
3913 unsigned long flags;
3914 struct dmar_drhd_unit *drhd;
3915 struct intel_iommu *iommu;
3916 unsigned long i;
3917 unsigned long ndomains;
3918
3919 for_each_drhd_unit(drhd) {
3920 if (drhd->ignored)
3921 continue;
3922 iommu = drhd->iommu;
3923
3924 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08003925 for_each_set_bit(i, iommu->domain_ids, ndomains) {
Weidong Han5e98c4b2008-12-08 23:03:27 +08003926 if (iommu->domains[i] == domain) {
3927 spin_lock_irqsave(&iommu->lock, flags);
3928 clear_bit(i, iommu->domain_ids);
3929 iommu->domains[i] = NULL;
3930 spin_unlock_irqrestore(&iommu->lock, flags);
3931 break;
3932 }
Weidong Han5e98c4b2008-12-08 23:03:27 +08003933 }
3934 }
3935}
3936
3937static void vm_domain_exit(struct dmar_domain *domain)
3938{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003939 /* Domain 0 is reserved, so dont process it */
3940 if (!domain)
3941 return;
3942
3943 vm_domain_remove_all_dev_info(domain);
3944 /* destroy iovas */
3945 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003946
3947 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003948 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003949
3950 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003951 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003952
3953 iommu_free_vm_domain(domain);
3954 free_domain_mem(domain);
3955}
3956
Joerg Roedel5d450802008-12-03 14:52:32 +01003957static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003958{
Joerg Roedel5d450802008-12-03 14:52:32 +01003959 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003960
Joerg Roedel5d450802008-12-03 14:52:32 +01003961 dmar_domain = iommu_alloc_vm_domain();
3962 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003963 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003964 "intel_iommu_domain_init: dmar_domain == NULL\n");
3965 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003966 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003967 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003968 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003969 "intel_iommu_domain_init() failed\n");
3970 vm_domain_exit(dmar_domain);
3971 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003972 }
Allen Kay8140a952011-10-14 12:32:17 -07003973 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01003974 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003975
Joerg Roedel8a0e7152012-01-26 19:40:54 +01003976 domain->geometry.aperture_start = 0;
3977 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
3978 domain->geometry.force_aperture = true;
3979
Joerg Roedel5d450802008-12-03 14:52:32 +01003980 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003981}
Kay, Allen M38717942008-09-09 18:37:29 +03003982
Joerg Roedel5d450802008-12-03 14:52:32 +01003983static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003984{
Joerg Roedel5d450802008-12-03 14:52:32 +01003985 struct dmar_domain *dmar_domain = domain->priv;
3986
3987 domain->priv = NULL;
3988 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003989}
Kay, Allen M38717942008-09-09 18:37:29 +03003990
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003991static int intel_iommu_attach_device(struct iommu_domain *domain,
3992 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003993{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003994 struct dmar_domain *dmar_domain = domain->priv;
3995 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003996 struct intel_iommu *iommu;
3997 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03003998
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003999 /* normally pdev is not mapped */
4000 if (unlikely(domain_context_mapped(pdev))) {
4001 struct dmar_domain *old_domain;
4002
4003 old_domain = find_domain(pdev);
4004 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004005 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
4006 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
4007 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004008 else
4009 domain_remove_dev_info(old_domain);
4010 }
4011 }
4012
David Woodhouse276dbf992009-04-04 01:45:37 +01004013 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
4014 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004015 if (!iommu)
4016 return -ENODEV;
4017
4018 /* check if this iommu agaw is sufficient for max mapped address */
4019 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004020 if (addr_width > cap_mgaw(iommu->cap))
4021 addr_width = cap_mgaw(iommu->cap);
4022
4023 if (dmar_domain->max_addr > (1LL << addr_width)) {
4024 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004025 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004026 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004027 return -EFAULT;
4028 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004029 dmar_domain->gaw = addr_width;
4030
4031 /*
4032 * Knock out extra levels of page tables if necessary
4033 */
4034 while (iommu->agaw < dmar_domain->agaw) {
4035 struct dma_pte *pte;
4036
4037 pte = dmar_domain->pgd;
4038 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004039 dmar_domain->pgd = (struct dma_pte *)
4040 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004041 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004042 }
4043 dmar_domain->agaw--;
4044 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004045
David Woodhouse5fe60f42009-08-09 10:53:41 +01004046 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004047}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004048
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004049static void intel_iommu_detach_device(struct iommu_domain *domain,
4050 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004051{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004052 struct dmar_domain *dmar_domain = domain->priv;
4053 struct pci_dev *pdev = to_pci_dev(dev);
4054
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004055 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03004056}
Kay, Allen M38717942008-09-09 18:37:29 +03004057
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004058static int intel_iommu_map(struct iommu_domain *domain,
4059 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004060 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004061{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004062 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004063 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004064 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004065 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004066
Joerg Roedeldde57a22008-12-03 15:04:09 +01004067 if (iommu_prot & IOMMU_READ)
4068 prot |= DMA_PTE_READ;
4069 if (iommu_prot & IOMMU_WRITE)
4070 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004071 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4072 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004073
David Woodhouse163cc522009-06-28 00:51:17 +01004074 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004075 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004076 u64 end;
4077
4078 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004079 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004080 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004081 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004082 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004083 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004084 return -EFAULT;
4085 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004086 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004087 }
David Woodhousead051222009-06-28 14:22:28 +01004088 /* Round up size to next multiple of PAGE_SIZE, if it and
4089 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004090 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004091 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4092 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004093 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004094}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004095
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004096static size_t intel_iommu_unmap(struct iommu_domain *domain,
4097 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004098{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004099 struct dmar_domain *dmar_domain = domain->priv;
Allen Kay292827c2011-10-14 12:31:54 -07004100 int order;
Sheng Yang4b99d352009-07-08 11:52:52 +01004101
Allen Kay292827c2011-10-14 12:31:54 -07004102 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
David Woodhouse163cc522009-06-28 00:51:17 +01004103 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004104
David Woodhouse163cc522009-06-28 00:51:17 +01004105 if (dmar_domain->max_addr == iova + size)
4106 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004107
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004108 return PAGE_SIZE << order;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004109}
Kay, Allen M38717942008-09-09 18:37:29 +03004110
Joerg Roedeld14d6572008-12-03 15:06:57 +01004111static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4112 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004113{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004114 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004115 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004116 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004117
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004118 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
Kay, Allen M38717942008-09-09 18:37:29 +03004119 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004120 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004121
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004122 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004123}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004124
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004125static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4126 unsigned long cap)
4127{
4128 struct dmar_domain *dmar_domain = domain->priv;
4129
4130 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4131 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004132 if (cap == IOMMU_CAP_INTR_REMAP)
Suresh Siddha95a02e92012-03-30 11:47:07 -07004133 return irq_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004134
4135 return 0;
4136}
4137
Alex Williamson783f1572012-05-30 14:19:43 -06004138static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
4139{
4140 pci_dev_put(*from);
4141 *from = to;
4142}
4143
4144#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4145
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004146static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004147{
4148 struct pci_dev *pdev = to_pci_dev(dev);
Alex Williamson3da4af0a2012-11-13 10:22:03 -07004149 struct pci_dev *bridge, *dma_pdev = NULL;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004150 struct iommu_group *group;
4151 int ret;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004152
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004153 if (!device_to_iommu(pci_domain_nr(pdev->bus),
4154 pdev->bus->number, pdev->devfn))
Alex Williamson70ae6f02011-10-21 15:56:11 -04004155 return -ENODEV;
4156
4157 bridge = pci_find_upstream_pcie_bridge(pdev);
4158 if (bridge) {
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004159 if (pci_is_pcie(bridge))
4160 dma_pdev = pci_get_domain_bus_and_slot(
4161 pci_domain_nr(pdev->bus),
4162 bridge->subordinate->number, 0);
Alex Williamson3da4af0a2012-11-13 10:22:03 -07004163 if (!dma_pdev)
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004164 dma_pdev = pci_dev_get(bridge);
4165 } else
4166 dma_pdev = pci_dev_get(pdev);
4167
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004168 /* Account for quirked devices */
Alex Williamson783f1572012-05-30 14:19:43 -06004169 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
4170
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004171 /*
4172 * If it's a multifunction device that does not support our
4173 * required ACS flags, add to the same group as function 0.
4174 */
Alex Williamson783f1572012-05-30 14:19:43 -06004175 if (dma_pdev->multifunction &&
4176 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
4177 swap_pci_ref(&dma_pdev,
4178 pci_get_slot(dma_pdev->bus,
4179 PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
4180 0)));
4181
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004182 /*
4183 * Devices on the root bus go through the iommu. If that's not us,
4184 * find the next upstream device and test ACS up to the root bus.
4185 * Finding the next device may require skipping virtual buses.
4186 */
Alex Williamson783f1572012-05-30 14:19:43 -06004187 while (!pci_is_root_bus(dma_pdev->bus)) {
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004188 struct pci_bus *bus = dma_pdev->bus;
4189
4190 while (!bus->self) {
4191 if (!pci_is_root_bus(bus))
4192 bus = bus->parent;
4193 else
4194 goto root_bus;
4195 }
4196
4197 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
Alex Williamson783f1572012-05-30 14:19:43 -06004198 break;
4199
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004200 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
Alex Williamson70ae6f02011-10-21 15:56:11 -04004201 }
4202
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004203root_bus:
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004204 group = iommu_group_get(&dma_pdev->dev);
4205 pci_dev_put(dma_pdev);
4206 if (!group) {
4207 group = iommu_group_alloc();
4208 if (IS_ERR(group))
4209 return PTR_ERR(group);
4210 }
Alex Williamsonbcb71ab2011-10-21 15:56:24 -04004211
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004212 ret = iommu_group_add_device(group, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004213
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004214 iommu_group_put(group);
4215 return ret;
4216}
4217
4218static void intel_iommu_remove_device(struct device *dev)
4219{
4220 iommu_group_remove_device(dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004221}
4222
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004223static struct iommu_ops intel_iommu_ops = {
4224 .domain_init = intel_iommu_domain_init,
4225 .domain_destroy = intel_iommu_domain_destroy,
4226 .attach_dev = intel_iommu_attach_device,
4227 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004228 .map = intel_iommu_map,
4229 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004230 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004231 .domain_has_cap = intel_iommu_domain_has_cap,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004232 .add_device = intel_iommu_add_device,
4233 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004234 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004235};
David Woodhouse9af88142009-02-13 23:18:03 +00004236
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004237static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004238{
4239 /*
4240 * Mobile 4 Series Chipset neglects to set RWBF capability,
4241 * but needs it:
4242 */
4243 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4244 rwbf_quirk = 1;
David Woodhouse2d9e6672010-06-15 10:57:57 +01004245
4246 /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
4247 if (dev->revision == 0x07) {
4248 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4249 dmar_map_gfx = 0;
4250 }
David Woodhouse9af88142009-02-13 23:18:03 +00004251}
4252
4253DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004254
Adam Jacksoneecfd572010-08-25 21:17:34 +01004255#define GGC 0x52
4256#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4257#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4258#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4259#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4260#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4261#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4262#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4263#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4264
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004265static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004266{
4267 unsigned short ggc;
4268
Adam Jacksoneecfd572010-08-25 21:17:34 +01004269 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004270 return;
4271
Adam Jacksoneecfd572010-08-25 21:17:34 +01004272 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004273 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4274 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004275 } else if (dmar_map_gfx) {
4276 /* we have to ensure the gfx device is idle before we flush */
4277 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4278 intel_iommu_strict = 1;
4279 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004280}
4281DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4282DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4283DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4284DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4285
David Woodhousee0fc7e02009-09-30 09:12:17 -07004286/* On Tylersburg chipsets, some BIOSes have been known to enable the
4287 ISOCH DMAR unit for the Azalia sound device, but not give it any
4288 TLB entries, which causes it to deadlock. Check for that. We do
4289 this in a function called from init_dmars(), instead of in a PCI
4290 quirk, because we don't want to print the obnoxious "BIOS broken"
4291 message if VT-d is actually disabled.
4292*/
4293static void __init check_tylersburg_isoch(void)
4294{
4295 struct pci_dev *pdev;
4296 uint32_t vtisochctrl;
4297
4298 /* If there's no Azalia in the system anyway, forget it. */
4299 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4300 if (!pdev)
4301 return;
4302 pci_dev_put(pdev);
4303
4304 /* System Management Registers. Might be hidden, in which case
4305 we can't do the sanity check. But that's OK, because the
4306 known-broken BIOSes _don't_ actually hide it, so far. */
4307 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4308 if (!pdev)
4309 return;
4310
4311 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4312 pci_dev_put(pdev);
4313 return;
4314 }
4315
4316 pci_dev_put(pdev);
4317
4318 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4319 if (vtisochctrl & 1)
4320 return;
4321
4322 /* Drop all bits other than the number of TLB entries */
4323 vtisochctrl &= 0x1c;
4324
4325 /* If we have the recommended number of TLB entries (16), fine. */
4326 if (vtisochctrl == 0x10)
4327 return;
4328
4329 /* Zero TLB entries? You get to ride the short bus to school. */
4330 if (!vtisochctrl) {
4331 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4332 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4333 dmi_get_system_info(DMI_BIOS_VENDOR),
4334 dmi_get_system_info(DMI_BIOS_VERSION),
4335 dmi_get_system_info(DMI_PRODUCT_VERSION));
4336 iommu_identity_mapping |= IDENTMAP_AZALIA;
4337 return;
4338 }
4339
4340 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4341 vtisochctrl);
4342}