blob: bf4a63c2477c3af3771d9fa70181153899a821dd [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080035#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010037#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030038#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010039#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070040#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100041#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020042#include <linux/pci-ats.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070043#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090044#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070045
Fenghua Yu5b6985c2008-10-16 18:02:32 -070046#define ROOT_SIZE VTD_PAGE_SIZE
47#define CONTEXT_SIZE VTD_PAGE_SIZE
48
Mike Travis825507d2011-05-28 13:15:06 -050049#define IS_BRIDGE_HOST_DEVICE(pdev) \
50 ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070051#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
52#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070053#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070054
55#define IOAPIC_RANGE_START (0xfee00000)
56#define IOAPIC_RANGE_END (0xfeefffff)
57#define IOVA_START_ADDR (0x1000)
58
59#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
60
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070061#define MAX_AGAW_WIDTH 64
62
David Woodhouse2ebe3152009-09-19 07:34:04 -070063#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
64#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
65
66/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
67 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
68#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
69 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
70#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070071
Mark McLoughlinf27be032008-11-20 15:49:43 +000072#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070073#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070074#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080075
Andrew Mortondf08cdc2010-09-22 13:05:11 -070076/* page table handling */
77#define LEVEL_STRIDE (9)
78#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
79
80static inline int agaw_to_level(int agaw)
81{
82 return agaw + 2;
83}
84
85static inline int agaw_to_width(int agaw)
86{
87 return 30 + agaw * LEVEL_STRIDE;
88}
89
90static inline int width_to_agaw(int width)
91{
92 return (width - 30) / LEVEL_STRIDE;
93}
94
95static inline unsigned int level_to_offset_bits(int level)
96{
97 return (level - 1) * LEVEL_STRIDE;
98}
99
100static inline int pfn_level_offset(unsigned long pfn, int level)
101{
102 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
103}
104
105static inline unsigned long level_mask(int level)
106{
107 return -1UL << level_to_offset_bits(level);
108}
109
110static inline unsigned long level_size(int level)
111{
112 return 1UL << level_to_offset_bits(level);
113}
114
115static inline unsigned long align_to_level(unsigned long pfn, int level)
116{
117 return (pfn + level_size(level) - 1) & level_mask(level);
118}
David Woodhousefd18de52009-05-10 23:57:41 +0100119
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100120static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
121{
122 return 1 << ((lvl - 1) * LEVEL_STRIDE);
123}
124
David Woodhousedd4e8312009-06-27 16:21:20 +0100125/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
126 are never going to work. */
127static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
128{
129 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
130}
131
132static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
133{
134 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
135}
136static inline unsigned long page_to_dma_pfn(struct page *pg)
137{
138 return mm_to_dma_pfn(page_to_pfn(pg));
139}
140static inline unsigned long virt_to_dma_pfn(void *p)
141{
142 return page_to_dma_pfn(virt_to_page(p));
143}
144
Weidong Hand9630fe2008-12-08 11:06:32 +0800145/* global iommu list, set NULL for ignored DMAR units */
146static struct intel_iommu **g_iommus;
147
David Woodhousee0fc7e02009-09-30 09:12:17 -0700148static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000149static int rwbf_quirk;
150
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000151/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700152 * set to 1 to panic kernel if can't successfully enable VT-d
153 * (used when kernel is launched w/ TXT)
154 */
155static int force_on = 0;
156
157/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000158 * 0: Present
159 * 1-11: Reserved
160 * 12-63: Context Ptr (12 - (haw-1))
161 * 64-127: Reserved
162 */
163struct root_entry {
164 u64 val;
165 u64 rsvd1;
166};
167#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
168static inline bool root_present(struct root_entry *root)
169{
170 return (root->val & 1);
171}
172static inline void set_root_present(struct root_entry *root)
173{
174 root->val |= 1;
175}
176static inline void set_root_value(struct root_entry *root, unsigned long value)
177{
178 root->val |= value & VTD_PAGE_MASK;
179}
180
181static inline struct context_entry *
182get_context_addr_from_root(struct root_entry *root)
183{
184 return (struct context_entry *)
185 (root_present(root)?phys_to_virt(
186 root->val & VTD_PAGE_MASK) :
187 NULL);
188}
189
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000190/*
191 * low 64 bits:
192 * 0: present
193 * 1: fault processing disable
194 * 2-3: translation type
195 * 12-63: address space root
196 * high 64 bits:
197 * 0-2: address width
198 * 3-6: aval
199 * 8-23: domain id
200 */
201struct context_entry {
202 u64 lo;
203 u64 hi;
204};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000205
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000206static inline bool context_present(struct context_entry *context)
207{
208 return (context->lo & 1);
209}
210static inline void context_set_present(struct context_entry *context)
211{
212 context->lo |= 1;
213}
214
215static inline void context_set_fault_enable(struct context_entry *context)
216{
217 context->lo &= (((u64)-1) << 2) | 1;
218}
219
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000220static inline void context_set_translation_type(struct context_entry *context,
221 unsigned long value)
222{
223 context->lo &= (((u64)-1) << 4) | 3;
224 context->lo |= (value & 3) << 2;
225}
226
227static inline void context_set_address_root(struct context_entry *context,
228 unsigned long value)
229{
230 context->lo |= value & VTD_PAGE_MASK;
231}
232
233static inline void context_set_address_width(struct context_entry *context,
234 unsigned long value)
235{
236 context->hi |= value & 7;
237}
238
239static inline void context_set_domain_id(struct context_entry *context,
240 unsigned long value)
241{
242 context->hi |= (value & ((1 << 16) - 1)) << 8;
243}
244
245static inline void context_clear_entry(struct context_entry *context)
246{
247 context->lo = 0;
248 context->hi = 0;
249}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000250
Mark McLoughlin622ba122008-11-20 15:49:46 +0000251/*
252 * 0: readable
253 * 1: writable
254 * 2-6: reserved
255 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800256 * 8-10: available
257 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000258 * 12-63: Host physcial address
259 */
260struct dma_pte {
261 u64 val;
262};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000263
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000264static inline void dma_clear_pte(struct dma_pte *pte)
265{
266 pte->val = 0;
267}
268
269static inline void dma_set_pte_readable(struct dma_pte *pte)
270{
271 pte->val |= DMA_PTE_READ;
272}
273
274static inline void dma_set_pte_writable(struct dma_pte *pte)
275{
276 pte->val |= DMA_PTE_WRITE;
277}
278
Sheng Yang9cf066972009-03-18 15:33:07 +0800279static inline void dma_set_pte_snp(struct dma_pte *pte)
280{
281 pte->val |= DMA_PTE_SNP;
282}
283
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000284static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
285{
286 pte->val = (pte->val & ~3) | (prot & 3);
287}
288
289static inline u64 dma_pte_addr(struct dma_pte *pte)
290{
David Woodhousec85994e2009-07-01 19:21:24 +0100291#ifdef CONFIG_64BIT
292 return pte->val & VTD_PAGE_MASK;
293#else
294 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100295 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100296#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000297}
298
David Woodhousedd4e8312009-06-27 16:21:20 +0100299static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000300{
David Woodhousedd4e8312009-06-27 16:21:20 +0100301 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000302}
303
304static inline bool dma_pte_present(struct dma_pte *pte)
305{
306 return (pte->val & 3) != 0;
307}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000308
David Woodhouse75e6bf92009-07-02 11:21:16 +0100309static inline int first_pte_in_page(struct dma_pte *pte)
310{
311 return !((unsigned long)pte & ~VTD_PAGE_MASK);
312}
313
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700314/*
315 * This domain is a statically identity mapping domain.
316 * 1. This domain creats a static 1:1 mapping to all usable memory.
317 * 2. It maps to each iommu if successful.
318 * 3. Each iommu mapps to this domain if successful.
319 */
David Woodhouse19943b02009-08-04 16:19:20 +0100320static struct dmar_domain *si_domain;
321static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700322
Weidong Han3b5410e2008-12-08 09:17:15 +0800323/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100324#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800325
Weidong Han1ce28fe2008-12-08 16:35:39 +0800326/* domain represents a virtual machine, more than one devices
327 * across iommus may be owned in one domain, e.g. kvm guest.
328 */
329#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
330
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700331/* si_domain contains mulitple devices */
332#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
333
Mark McLoughlin99126f72008-11-20 15:49:47 +0000334struct dmar_domain {
335 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700336 int nid; /* node id */
Weidong Han8c11e792008-12-08 15:29:22 +0800337 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000338
339 struct list_head devices; /* all devices' list */
340 struct iova_domain iovad; /* iova's that belong to this domain */
341
342 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000343 int gaw; /* max guest address width */
344
345 /* adjusted guest address width, 0 is level 2 30-bit */
346 int agaw;
347
Weidong Han3b5410e2008-12-08 09:17:15 +0800348 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800349
350 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800351 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800352 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100353 int iommu_superpage;/* Level of superpages supported:
354 0 == 4KiB (no superpages), 1 == 2MiB,
355 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800356 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800357 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000358};
359
Mark McLoughlina647dac2008-11-20 15:49:48 +0000360/* PCI domain-device relationship */
361struct device_domain_info {
362 struct list_head link; /* link to domain siblings */
363 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100364 int segment; /* PCI domain */
365 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000366 u8 devfn; /* PCI devfn number */
Stefan Assmann45e829e2009-12-03 06:49:24 -0500367 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800368 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000369 struct dmar_domain *domain; /* pointer to domain */
370};
371
mark gross5e0d2a62008-03-04 15:22:08 -0800372static void flush_unmaps_timeout(unsigned long data);
373
374DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
375
mark gross80b20dd2008-04-18 13:53:58 -0700376#define HIGH_WATER_MARK 250
377struct deferred_flush_tables {
378 int next;
379 struct iova *iova[HIGH_WATER_MARK];
380 struct dmar_domain *domain[HIGH_WATER_MARK];
381};
382
383static struct deferred_flush_tables *deferred_flush;
384
mark gross5e0d2a62008-03-04 15:22:08 -0800385/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800386static int g_num_of_iommus;
387
388static DEFINE_SPINLOCK(async_umap_flush_lock);
389static LIST_HEAD(unmaps_to_do);
390
391static int timer_on;
392static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800393
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700394static void domain_remove_dev_info(struct dmar_domain *domain);
395
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800396#ifdef CONFIG_DMAR_DEFAULT_ON
397int dmar_disabled = 0;
398#else
399int dmar_disabled = 1;
400#endif /*CONFIG_DMAR_DEFAULT_ON*/
401
David Woodhouse2d9e6672010-06-15 10:57:57 +0100402static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700403static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800404static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100405static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700406
407#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
408static DEFINE_SPINLOCK(device_domain_lock);
409static LIST_HEAD(device_domain_list);
410
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100411static struct iommu_ops intel_iommu_ops;
412
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700413static int __init intel_iommu_setup(char *str)
414{
415 if (!str)
416 return -EINVAL;
417 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800418 if (!strncmp(str, "on", 2)) {
419 dmar_disabled = 0;
420 printk(KERN_INFO "Intel-IOMMU: enabled\n");
421 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700422 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800423 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700424 } else if (!strncmp(str, "igfx_off", 8)) {
425 dmar_map_gfx = 0;
426 printk(KERN_INFO
427 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700428 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800429 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700430 "Intel-IOMMU: Forcing DAC for PCI devices\n");
431 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800432 } else if (!strncmp(str, "strict", 6)) {
433 printk(KERN_INFO
434 "Intel-IOMMU: disable batched IOTLB flush\n");
435 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100436 } else if (!strncmp(str, "sp_off", 6)) {
437 printk(KERN_INFO
438 "Intel-IOMMU: disable supported super page\n");
439 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700440 }
441
442 str += strcspn(str, ",");
443 while (*str == ',')
444 str++;
445 }
446 return 0;
447}
448__setup("intel_iommu=", intel_iommu_setup);
449
450static struct kmem_cache *iommu_domain_cache;
451static struct kmem_cache *iommu_devinfo_cache;
452static struct kmem_cache *iommu_iova_cache;
453
Suresh Siddha4c923d42009-10-02 11:01:24 -0700454static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700455{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700456 struct page *page;
457 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700458
Suresh Siddha4c923d42009-10-02 11:01:24 -0700459 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
460 if (page)
461 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700462 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700463}
464
465static inline void free_pgtable_page(void *vaddr)
466{
467 free_page((unsigned long)vaddr);
468}
469
470static inline void *alloc_domain_mem(void)
471{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900472 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700473}
474
Kay, Allen M38717942008-09-09 18:37:29 +0300475static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700476{
477 kmem_cache_free(iommu_domain_cache, vaddr);
478}
479
480static inline void * alloc_devinfo_mem(void)
481{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900482 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700483}
484
485static inline void free_devinfo_mem(void *vaddr)
486{
487 kmem_cache_free(iommu_devinfo_cache, vaddr);
488}
489
490struct iova *alloc_iova_mem(void)
491{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900492 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700493}
494
495void free_iova_mem(struct iova *iova)
496{
497 kmem_cache_free(iommu_iova_cache, iova);
498}
499
Weidong Han1b573682008-12-08 15:34:06 +0800500
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700501static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800502{
503 unsigned long sagaw;
504 int agaw = -1;
505
506 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700507 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800508 agaw >= 0; agaw--) {
509 if (test_bit(agaw, &sagaw))
510 break;
511 }
512
513 return agaw;
514}
515
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700516/*
517 * Calculate max SAGAW for each iommu.
518 */
519int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
520{
521 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
522}
523
524/*
525 * calculate agaw for each iommu.
526 * "SAGAW" may be different across iommus, use a default agaw, and
527 * get a supported less agaw for iommus that don't support the default agaw.
528 */
529int iommu_calculate_agaw(struct intel_iommu *iommu)
530{
531 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
532}
533
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700534/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800535static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
536{
537 int iommu_id;
538
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700539 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800540 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700541 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800542
Weidong Han8c11e792008-12-08 15:29:22 +0800543 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
544 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
545 return NULL;
546
547 return g_iommus[iommu_id];
548}
549
Weidong Han8e6040972008-12-08 15:49:06 +0800550static void domain_update_iommu_coherency(struct dmar_domain *domain)
551{
552 int i;
553
554 domain->iommu_coherency = 1;
555
Akinobu Mitaa45946a2010-03-11 14:04:08 -0800556 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
Weidong Han8e6040972008-12-08 15:49:06 +0800557 if (!ecap_coherent(g_iommus[i]->ecap)) {
558 domain->iommu_coherency = 0;
559 break;
560 }
Weidong Han8e6040972008-12-08 15:49:06 +0800561 }
562}
563
Sheng Yang58c610b2009-03-18 15:33:05 +0800564static void domain_update_iommu_snooping(struct dmar_domain *domain)
565{
566 int i;
567
568 domain->iommu_snooping = 1;
569
Akinobu Mitaa45946a2010-03-11 14:04:08 -0800570 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800571 if (!ecap_sc_support(g_iommus[i]->ecap)) {
572 domain->iommu_snooping = 0;
573 break;
574 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800575 }
576}
577
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100578static void domain_update_iommu_superpage(struct dmar_domain *domain)
579{
580 int i, mask = 0xf;
581
582 if (!intel_iommu_superpage) {
583 domain->iommu_superpage = 0;
584 return;
585 }
586
587 domain->iommu_superpage = 4; /* 1TiB */
588
589 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
590 mask |= cap_super_page_val(g_iommus[i]->cap);
591 if (!mask) {
592 break;
593 }
594 }
595 domain->iommu_superpage = fls(mask);
596}
597
Sheng Yang58c610b2009-03-18 15:33:05 +0800598/* Some capabilities may be different across iommus */
599static void domain_update_iommu_cap(struct dmar_domain *domain)
600{
601 domain_update_iommu_coherency(domain);
602 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100603 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800604}
605
David Woodhouse276dbf992009-04-04 01:45:37 +0100606static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800607{
608 struct dmar_drhd_unit *drhd = NULL;
609 int i;
610
611 for_each_drhd_unit(drhd) {
612 if (drhd->ignored)
613 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100614 if (segment != drhd->segment)
615 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800616
David Woodhouse924b6232009-04-04 00:39:25 +0100617 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000618 if (drhd->devices[i] &&
619 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800620 drhd->devices[i]->devfn == devfn)
621 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700622 if (drhd->devices[i] &&
623 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100624 drhd->devices[i]->subordinate->number <= bus &&
625 drhd->devices[i]->subordinate->subordinate >= bus)
626 return drhd->iommu;
627 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800628
629 if (drhd->include_all)
630 return drhd->iommu;
631 }
632
633 return NULL;
634}
635
Weidong Han5331fe62008-12-08 23:00:00 +0800636static void domain_flush_cache(struct dmar_domain *domain,
637 void *addr, int size)
638{
639 if (!domain->iommu_coherency)
640 clflush_cache_range(addr, size);
641}
642
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700643/* Gets context entry for a given bus and devfn */
644static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
645 u8 bus, u8 devfn)
646{
647 struct root_entry *root;
648 struct context_entry *context;
649 unsigned long phy_addr;
650 unsigned long flags;
651
652 spin_lock_irqsave(&iommu->lock, flags);
653 root = &iommu->root_entry[bus];
654 context = get_context_addr_from_root(root);
655 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700656 context = (struct context_entry *)
657 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700658 if (!context) {
659 spin_unlock_irqrestore(&iommu->lock, flags);
660 return NULL;
661 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700662 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700663 phy_addr = virt_to_phys((void *)context);
664 set_root_value(root, phy_addr);
665 set_root_present(root);
666 __iommu_flush_cache(iommu, root, sizeof(*root));
667 }
668 spin_unlock_irqrestore(&iommu->lock, flags);
669 return &context[devfn];
670}
671
672static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
673{
674 struct root_entry *root;
675 struct context_entry *context;
676 int ret;
677 unsigned long flags;
678
679 spin_lock_irqsave(&iommu->lock, flags);
680 root = &iommu->root_entry[bus];
681 context = get_context_addr_from_root(root);
682 if (!context) {
683 ret = 0;
684 goto out;
685 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000686 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700687out:
688 spin_unlock_irqrestore(&iommu->lock, flags);
689 return ret;
690}
691
692static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
693{
694 struct root_entry *root;
695 struct context_entry *context;
696 unsigned long flags;
697
698 spin_lock_irqsave(&iommu->lock, flags);
699 root = &iommu->root_entry[bus];
700 context = get_context_addr_from_root(root);
701 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000702 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700703 __iommu_flush_cache(iommu, &context[devfn], \
704 sizeof(*context));
705 }
706 spin_unlock_irqrestore(&iommu->lock, flags);
707}
708
709static void free_context_table(struct intel_iommu *iommu)
710{
711 struct root_entry *root;
712 int i;
713 unsigned long flags;
714 struct context_entry *context;
715
716 spin_lock_irqsave(&iommu->lock, flags);
717 if (!iommu->root_entry) {
718 goto out;
719 }
720 for (i = 0; i < ROOT_ENTRY_NR; i++) {
721 root = &iommu->root_entry[i];
722 context = get_context_addr_from_root(root);
723 if (context)
724 free_pgtable_page(context);
725 }
726 free_pgtable_page(iommu->root_entry);
727 iommu->root_entry = NULL;
728out:
729 spin_unlock_irqrestore(&iommu->lock, flags);
730}
731
David Woodhouseb026fd22009-06-28 10:37:25 +0100732static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100733 unsigned long pfn, int large_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700734{
David Woodhouseb026fd22009-06-28 10:37:25 +0100735 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700736 struct dma_pte *parent, *pte = NULL;
737 int level = agaw_to_level(domain->agaw);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100738 int offset, target_level;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700739
740 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100741 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700742 parent = domain->pgd;
743
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100744 /* Search pte */
745 if (!large_level)
746 target_level = 1;
747 else
748 target_level = large_level;
749
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700750 while (level > 0) {
751 void *tmp_page;
752
David Woodhouseb026fd22009-06-28 10:37:25 +0100753 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700754 pte = &parent[offset];
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100755 if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE))
756 break;
757 if (level == target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700758 break;
759
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000760 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100761 uint64_t pteval;
762
Suresh Siddha4c923d42009-10-02 11:01:24 -0700763 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700764
David Woodhouse206a73c12009-07-01 19:30:28 +0100765 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700766 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100767
David Woodhousec85994e2009-07-01 19:21:24 +0100768 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400769 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100770 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
771 /* Someone else set it while we were thinking; use theirs. */
772 free_pgtable_page(tmp_page);
773 } else {
774 dma_pte_addr(pte);
775 domain_flush_cache(domain, pte, sizeof(*pte));
776 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700777 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000778 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700779 level--;
780 }
781
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700782 return pte;
783}
784
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100785
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700786/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100787static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
788 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100789 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700790{
791 struct dma_pte *parent, *pte = NULL;
792 int total = agaw_to_level(domain->agaw);
793 int offset;
794
795 parent = domain->pgd;
796 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100797 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700798 pte = &parent[offset];
799 if (level == total)
800 return pte;
801
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100802 if (!dma_pte_present(pte)) {
803 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700804 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100805 }
806
807 if (pte->val & DMA_PTE_LARGE_PAGE) {
808 *large_page = total;
809 return pte;
810 }
811
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000812 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700813 total--;
814 }
815 return NULL;
816}
817
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700818/* clear last level pte, a tlb flush should be followed */
David Woodhouse595badf2009-06-27 22:09:11 +0100819static void dma_pte_clear_range(struct dmar_domain *domain,
820 unsigned long start_pfn,
821 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700822{
David Woodhouse04b18e62009-06-27 19:15:01 +0100823 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100824 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100825 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700826
David Woodhouse04b18e62009-06-27 19:15:01 +0100827 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100828 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700829 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100830
David Woodhouse04b18e62009-06-27 19:15:01 +0100831 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700832 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100833 large_page = 1;
834 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100835 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100836 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100837 continue;
838 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100839 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100840 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100841 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100842 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100843 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
844
David Woodhouse310a5ab2009-06-28 18:52:20 +0100845 domain_flush_cache(domain, first_pte,
846 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700847
848 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700849}
850
851/* free page table pages. last level pte should already be cleared */
852static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100853 unsigned long start_pfn,
854 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700855{
David Woodhouse6660c632009-06-27 22:41:00 +0100856 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhousef3a0a522009-06-30 03:40:07 +0100857 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700858 int total = agaw_to_level(domain->agaw);
859 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100860 unsigned long tmp;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100861 int large_page = 2;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700862
David Woodhouse6660c632009-06-27 22:41:00 +0100863 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
864 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700865 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700866
David Woodhousef3a0a522009-06-30 03:40:07 +0100867 /* We don't need lock here; nobody else touches the iova range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700868 level = 2;
869 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100870 tmp = align_to_level(start_pfn, level);
871
David Woodhousef3a0a522009-06-30 03:40:07 +0100872 /* If we can't even clear one PTE at this level, we're done */
David Woodhouse6660c632009-06-27 22:41:00 +0100873 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700874 return;
875
David Woodhouse59c36282009-09-19 07:36:28 -0700876 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100877 large_page = level;
878 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
879 if (large_page > level)
880 level = large_page + 1;
David Woodhousef3a0a522009-06-30 03:40:07 +0100881 if (!pte) {
882 tmp = align_to_level(tmp + 1, level + 1);
883 continue;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700884 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100885 do {
David Woodhouse6a43e572009-07-02 12:02:34 +0100886 if (dma_pte_present(pte)) {
887 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
888 dma_clear_pte(pte);
889 }
David Woodhousef3a0a522009-06-30 03:40:07 +0100890 pte++;
891 tmp += level_size(level);
David Woodhouse75e6bf92009-07-02 11:21:16 +0100892 } while (!first_pte_in_page(pte) &&
893 tmp + level_size(level) - 1 <= last_pfn);
894
David Woodhousef3a0a522009-06-30 03:40:07 +0100895 domain_flush_cache(domain, first_pte,
896 (void *)pte - (void *)first_pte);
897
David Woodhouse59c36282009-09-19 07:36:28 -0700898 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700899 level++;
900 }
901 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100902 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700903 free_pgtable_page(domain->pgd);
904 domain->pgd = NULL;
905 }
906}
907
908/* iommu handling */
909static int iommu_alloc_root_entry(struct intel_iommu *iommu)
910{
911 struct root_entry *root;
912 unsigned long flags;
913
Suresh Siddha4c923d42009-10-02 11:01:24 -0700914 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700915 if (!root)
916 return -ENOMEM;
917
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700918 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700919
920 spin_lock_irqsave(&iommu->lock, flags);
921 iommu->root_entry = root;
922 spin_unlock_irqrestore(&iommu->lock, flags);
923
924 return 0;
925}
926
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700927static void iommu_set_root_entry(struct intel_iommu *iommu)
928{
929 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100930 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700931 unsigned long flag;
932
933 addr = iommu->root_entry;
934
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200935 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700936 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
937
David Woodhousec416daa2009-05-10 20:30:58 +0100938 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700939
940 /* Make sure hardware complete it */
941 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100942 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700943
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200944 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700945}
946
947static void iommu_flush_write_buffer(struct intel_iommu *iommu)
948{
949 u32 val;
950 unsigned long flag;
951
David Woodhouse9af88142009-02-13 23:18:03 +0000952 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700953 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700954
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200955 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100956 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700957
958 /* Make sure hardware complete it */
959 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100960 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700961
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200962 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700963}
964
965/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100966static void __iommu_flush_context(struct intel_iommu *iommu,
967 u16 did, u16 source_id, u8 function_mask,
968 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700969{
970 u64 val = 0;
971 unsigned long flag;
972
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700973 switch (type) {
974 case DMA_CCMD_GLOBAL_INVL:
975 val = DMA_CCMD_GLOBAL_INVL;
976 break;
977 case DMA_CCMD_DOMAIN_INVL:
978 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
979 break;
980 case DMA_CCMD_DEVICE_INVL:
981 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
982 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
983 break;
984 default:
985 BUG();
986 }
987 val |= DMA_CCMD_ICC;
988
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200989 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700990 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
991
992 /* Make sure hardware complete it */
993 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
994 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
995
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200996 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700997}
998
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700999/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001000static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1001 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001002{
1003 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1004 u64 val = 0, val_iva = 0;
1005 unsigned long flag;
1006
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001007 switch (type) {
1008 case DMA_TLB_GLOBAL_FLUSH:
1009 /* global flush doesn't need set IVA_REG */
1010 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1011 break;
1012 case DMA_TLB_DSI_FLUSH:
1013 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1014 break;
1015 case DMA_TLB_PSI_FLUSH:
1016 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1017 /* Note: always flush non-leaf currently */
1018 val_iva = size_order | addr;
1019 break;
1020 default:
1021 BUG();
1022 }
1023 /* Note: set drain read/write */
1024#if 0
1025 /*
1026 * This is probably to be super secure.. Looks like we can
1027 * ignore it without any impact.
1028 */
1029 if (cap_read_drain(iommu->cap))
1030 val |= DMA_TLB_READ_DRAIN;
1031#endif
1032 if (cap_write_drain(iommu->cap))
1033 val |= DMA_TLB_WRITE_DRAIN;
1034
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001035 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001036 /* Note: Only uses first TLB reg currently */
1037 if (val_iva)
1038 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1039 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1040
1041 /* Make sure hardware complete it */
1042 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1043 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1044
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001045 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001046
1047 /* check IOTLB invalidation granularity */
1048 if (DMA_TLB_IAIG(val) == 0)
1049 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1050 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1051 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001052 (unsigned long long)DMA_TLB_IIRG(type),
1053 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001054}
1055
Yu Zhao93a23a72009-05-18 13:51:37 +08001056static struct device_domain_info *iommu_support_dev_iotlb(
1057 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001058{
Yu Zhao93a23a72009-05-18 13:51:37 +08001059 int found = 0;
1060 unsigned long flags;
1061 struct device_domain_info *info;
1062 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1063
1064 if (!ecap_dev_iotlb_support(iommu->ecap))
1065 return NULL;
1066
1067 if (!iommu->qi)
1068 return NULL;
1069
1070 spin_lock_irqsave(&device_domain_lock, flags);
1071 list_for_each_entry(info, &domain->devices, link)
1072 if (info->bus == bus && info->devfn == devfn) {
1073 found = 1;
1074 break;
1075 }
1076 spin_unlock_irqrestore(&device_domain_lock, flags);
1077
1078 if (!found || !info->dev)
1079 return NULL;
1080
1081 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1082 return NULL;
1083
1084 if (!dmar_find_matched_atsr_unit(info->dev))
1085 return NULL;
1086
1087 info->iommu = iommu;
1088
1089 return info;
1090}
1091
1092static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1093{
1094 if (!info)
1095 return;
1096
1097 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1098}
1099
1100static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1101{
1102 if (!info->dev || !pci_ats_enabled(info->dev))
1103 return;
1104
1105 pci_disable_ats(info->dev);
1106}
1107
1108static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1109 u64 addr, unsigned mask)
1110{
1111 u16 sid, qdep;
1112 unsigned long flags;
1113 struct device_domain_info *info;
1114
1115 spin_lock_irqsave(&device_domain_lock, flags);
1116 list_for_each_entry(info, &domain->devices, link) {
1117 if (!info->dev || !pci_ats_enabled(info->dev))
1118 continue;
1119
1120 sid = info->bus << 8 | info->devfn;
1121 qdep = pci_ats_queue_depth(info->dev);
1122 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1123 }
1124 spin_unlock_irqrestore(&device_domain_lock, flags);
1125}
1126
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001127static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
Nadav Amit82653632010-04-01 13:24:40 +03001128 unsigned long pfn, unsigned int pages, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001129{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001130 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001131 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001132
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001133 BUG_ON(pages == 0);
1134
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001135 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001136 * Fallback to domain selective flush if no PSI support or the size is
1137 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001138 * PSI requires page size to be 2 ^ x, and the base address is naturally
1139 * aligned to the size
1140 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001141 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1142 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001143 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001144 else
1145 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1146 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001147
1148 /*
Nadav Amit82653632010-04-01 13:24:40 +03001149 * In caching mode, changes of pages from non-present to present require
1150 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001151 */
Nadav Amit82653632010-04-01 13:24:40 +03001152 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001153 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001154}
1155
mark grossf8bab732008-02-08 04:18:38 -08001156static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1157{
1158 u32 pmen;
1159 unsigned long flags;
1160
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001161 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001162 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1163 pmen &= ~DMA_PMEN_EPM;
1164 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1165
1166 /* wait for the protected region status bit to clear */
1167 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1168 readl, !(pmen & DMA_PMEN_PRS), pmen);
1169
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001170 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001171}
1172
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001173static int iommu_enable_translation(struct intel_iommu *iommu)
1174{
1175 u32 sts;
1176 unsigned long flags;
1177
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001178 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001179 iommu->gcmd |= DMA_GCMD_TE;
1180 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001181
1182 /* Make sure hardware complete it */
1183 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001184 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001185
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001186 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001187 return 0;
1188}
1189
1190static int iommu_disable_translation(struct intel_iommu *iommu)
1191{
1192 u32 sts;
1193 unsigned long flag;
1194
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001195 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001196 iommu->gcmd &= ~DMA_GCMD_TE;
1197 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1198
1199 /* Make sure hardware complete it */
1200 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001201 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001202
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001203 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001204 return 0;
1205}
1206
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001207
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001208static int iommu_init_domains(struct intel_iommu *iommu)
1209{
1210 unsigned long ndomains;
1211 unsigned long nlongs;
1212
1213 ndomains = cap_ndoms(iommu->cap);
Yinghai Lu680a7522010-04-08 19:58:23 +01001214 pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
1215 ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001216 nlongs = BITS_TO_LONGS(ndomains);
1217
Donald Dutile94a91b52009-08-20 16:51:34 -04001218 spin_lock_init(&iommu->lock);
1219
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001220 /* TBD: there might be 64K domains,
1221 * consider other allocation for future chip
1222 */
1223 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1224 if (!iommu->domain_ids) {
1225 printk(KERN_ERR "Allocating domain id array failed\n");
1226 return -ENOMEM;
1227 }
1228 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1229 GFP_KERNEL);
1230 if (!iommu->domains) {
1231 printk(KERN_ERR "Allocating domain array failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001232 return -ENOMEM;
1233 }
1234
1235 /*
1236 * if Caching mode is set, then invalid translations are tagged
1237 * with domainid 0. Hence we need to pre-allocate it.
1238 */
1239 if (cap_caching_mode(iommu->cap))
1240 set_bit(0, iommu->domain_ids);
1241 return 0;
1242}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001243
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001244
1245static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001246static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001247
1248void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001249{
1250 struct dmar_domain *domain;
1251 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001252 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001253
Donald Dutile94a91b52009-08-20 16:51:34 -04001254 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001255 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Donald Dutile94a91b52009-08-20 16:51:34 -04001256 domain = iommu->domains[i];
1257 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001258
Donald Dutile94a91b52009-08-20 16:51:34 -04001259 spin_lock_irqsave(&domain->iommu_lock, flags);
1260 if (--domain->iommu_count == 0) {
1261 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1262 vm_domain_exit(domain);
1263 else
1264 domain_exit(domain);
1265 }
1266 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001267 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001268 }
1269
1270 if (iommu->gcmd & DMA_GCMD_TE)
1271 iommu_disable_translation(iommu);
1272
1273 if (iommu->irq) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001274 irq_set_handler_data(iommu->irq, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001275 /* This will mask the irq */
1276 free_irq(iommu->irq, iommu);
1277 destroy_irq(iommu->irq);
1278 }
1279
1280 kfree(iommu->domains);
1281 kfree(iommu->domain_ids);
1282
Weidong Hand9630fe2008-12-08 11:06:32 +08001283 g_iommus[iommu->seq_id] = NULL;
1284
1285 /* if all iommus are freed, free g_iommus */
1286 for (i = 0; i < g_num_of_iommus; i++) {
1287 if (g_iommus[i])
1288 break;
1289 }
1290
1291 if (i == g_num_of_iommus)
1292 kfree(g_iommus);
1293
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001294 /* free context mapping */
1295 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001296}
1297
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001298static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001299{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001300 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001301
1302 domain = alloc_domain_mem();
1303 if (!domain)
1304 return NULL;
1305
Suresh Siddha4c923d42009-10-02 11:01:24 -07001306 domain->nid = -1;
Weidong Han8c11e792008-12-08 15:29:22 +08001307 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
Weidong Hand71a2f32008-12-07 21:13:41 +08001308 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001309
1310 return domain;
1311}
1312
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001313static int iommu_attach_domain(struct dmar_domain *domain,
1314 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001315{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001316 int num;
1317 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001318 unsigned long flags;
1319
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001320 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001321
1322 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001323
1324 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1325 if (num >= ndomains) {
1326 spin_unlock_irqrestore(&iommu->lock, flags);
1327 printk(KERN_ERR "IOMMU: no free domain ids\n");
1328 return -ENOMEM;
1329 }
1330
1331 domain->id = num;
1332 set_bit(num, iommu->domain_ids);
1333 set_bit(iommu->seq_id, &domain->iommu_bmp);
1334 iommu->domains[num] = domain;
1335 spin_unlock_irqrestore(&iommu->lock, flags);
1336
1337 return 0;
1338}
1339
1340static void iommu_detach_domain(struct dmar_domain *domain,
1341 struct intel_iommu *iommu)
1342{
1343 unsigned long flags;
1344 int num, ndomains;
1345 int found = 0;
1346
1347 spin_lock_irqsave(&iommu->lock, flags);
1348 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001349 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001350 if (iommu->domains[num] == domain) {
1351 found = 1;
1352 break;
1353 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001354 }
1355
1356 if (found) {
1357 clear_bit(num, iommu->domain_ids);
1358 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1359 iommu->domains[num] = NULL;
1360 }
Weidong Han8c11e792008-12-08 15:29:22 +08001361 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001362}
1363
1364static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001365static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001366
Joseph Cihula51a63e62011-03-21 11:04:24 -07001367static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001368{
1369 struct pci_dev *pdev = NULL;
1370 struct iova *iova;
1371 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001372
David Millerf6611972008-02-06 01:36:23 -08001373 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001374
Mark Gross8a443df2008-03-04 14:59:31 -08001375 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1376 &reserved_rbtree_key);
1377
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001378 /* IOAPIC ranges shouldn't be accessed by DMA */
1379 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1380 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001381 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001382 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001383 return -ENODEV;
1384 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001385
1386 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1387 for_each_pci_dev(pdev) {
1388 struct resource *r;
1389
1390 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1391 r = &pdev->resource[i];
1392 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1393 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001394 iova = reserve_iova(&reserved_iova_list,
1395 IOVA_PFN(r->start),
1396 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001397 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001398 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001399 return -ENODEV;
1400 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001401 }
1402 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001403 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001404}
1405
1406static void domain_reserve_special_ranges(struct dmar_domain *domain)
1407{
1408 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1409}
1410
1411static inline int guestwidth_to_adjustwidth(int gaw)
1412{
1413 int agaw;
1414 int r = (gaw - 12) % 9;
1415
1416 if (r == 0)
1417 agaw = gaw;
1418 else
1419 agaw = gaw + 9 - r;
1420 if (agaw > 64)
1421 agaw = 64;
1422 return agaw;
1423}
1424
1425static int domain_init(struct dmar_domain *domain, int guest_width)
1426{
1427 struct intel_iommu *iommu;
1428 int adjust_width, agaw;
1429 unsigned long sagaw;
1430
David Millerf6611972008-02-06 01:36:23 -08001431 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001432 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001433
1434 domain_reserve_special_ranges(domain);
1435
1436 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001437 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001438 if (guest_width > cap_mgaw(iommu->cap))
1439 guest_width = cap_mgaw(iommu->cap);
1440 domain->gaw = guest_width;
1441 adjust_width = guestwidth_to_adjustwidth(guest_width);
1442 agaw = width_to_agaw(adjust_width);
1443 sagaw = cap_sagaw(iommu->cap);
1444 if (!test_bit(agaw, &sagaw)) {
1445 /* hardware doesn't support it, choose a bigger one */
1446 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1447 agaw = find_next_bit(&sagaw, 5, agaw);
1448 if (agaw >= 5)
1449 return -ENODEV;
1450 }
1451 domain->agaw = agaw;
1452 INIT_LIST_HEAD(&domain->devices);
1453
Weidong Han8e6040972008-12-08 15:49:06 +08001454 if (ecap_coherent(iommu->ecap))
1455 domain->iommu_coherency = 1;
1456 else
1457 domain->iommu_coherency = 0;
1458
Sheng Yang58c610b2009-03-18 15:33:05 +08001459 if (ecap_sc_support(iommu->ecap))
1460 domain->iommu_snooping = 1;
1461 else
1462 domain->iommu_snooping = 0;
1463
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001464 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
Weidong Hanc7151a82008-12-08 22:51:37 +08001465 domain->iommu_count = 1;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001466 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001467
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001468 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001469 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001470 if (!domain->pgd)
1471 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001472 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001473 return 0;
1474}
1475
1476static void domain_exit(struct dmar_domain *domain)
1477{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001478 struct dmar_drhd_unit *drhd;
1479 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001480
1481 /* Domain 0 is reserved, so dont process it */
1482 if (!domain)
1483 return;
1484
Alex Williamson7b668352011-05-24 12:02:41 +01001485 /* Flush any lazy unmaps that may reference this domain */
1486 if (!intel_iommu_strict)
1487 flush_unmaps_timeout(0);
1488
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001489 domain_remove_dev_info(domain);
1490 /* destroy iovas */
1491 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001492
1493 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001494 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001495
1496 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001497 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001498
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001499 for_each_active_iommu(iommu, drhd)
1500 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1501 iommu_detach_domain(domain, iommu);
1502
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001503 free_domain_mem(domain);
1504}
1505
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001506static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1507 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001508{
1509 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001510 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001511 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001512 struct dma_pte *pgd;
1513 unsigned long num;
1514 unsigned long ndomains;
1515 int id;
1516 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001517 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001518
1519 pr_debug("Set context mapping for %02x:%02x.%d\n",
1520 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001521
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001522 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001523 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1524 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001525
David Woodhouse276dbf992009-04-04 01:45:37 +01001526 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001527 if (!iommu)
1528 return -ENODEV;
1529
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001530 context = device_to_context_entry(iommu, bus, devfn);
1531 if (!context)
1532 return -ENOMEM;
1533 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001534 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001535 spin_unlock_irqrestore(&iommu->lock, flags);
1536 return 0;
1537 }
1538
Weidong Hanea6606b2008-12-08 23:08:15 +08001539 id = domain->id;
1540 pgd = domain->pgd;
1541
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001542 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1543 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001544 int found = 0;
1545
1546 /* find an available domain id for this device in iommu */
1547 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001548 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001549 if (iommu->domains[num] == domain) {
1550 id = num;
1551 found = 1;
1552 break;
1553 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001554 }
1555
1556 if (found == 0) {
1557 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1558 if (num >= ndomains) {
1559 spin_unlock_irqrestore(&iommu->lock, flags);
1560 printk(KERN_ERR "IOMMU: no free domain ids\n");
1561 return -EFAULT;
1562 }
1563
1564 set_bit(num, iommu->domain_ids);
1565 iommu->domains[num] = domain;
1566 id = num;
1567 }
1568
1569 /* Skip top levels of page tables for
1570 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001571 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001572 */
Chris Wright1672af12009-12-02 12:06:34 -08001573 if (translation != CONTEXT_TT_PASS_THROUGH) {
1574 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1575 pgd = phys_to_virt(dma_pte_addr(pgd));
1576 if (!dma_pte_present(pgd)) {
1577 spin_unlock_irqrestore(&iommu->lock, flags);
1578 return -ENOMEM;
1579 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001580 }
1581 }
1582 }
1583
1584 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001585
Yu Zhao93a23a72009-05-18 13:51:37 +08001586 if (translation != CONTEXT_TT_PASS_THROUGH) {
1587 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1588 translation = info ? CONTEXT_TT_DEV_IOTLB :
1589 CONTEXT_TT_MULTI_LEVEL;
1590 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001591 /*
1592 * In pass through mode, AW must be programmed to indicate the largest
1593 * AGAW value supported by hardware. And ASR is ignored by hardware.
1594 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001595 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001596 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001597 else {
1598 context_set_address_root(context, virt_to_phys(pgd));
1599 context_set_address_width(context, iommu->agaw);
1600 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001601
1602 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001603 context_set_fault_enable(context);
1604 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001605 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001606
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001607 /*
1608 * It's a non-present to present mapping. If hardware doesn't cache
1609 * non-present entry we only need to flush the write-buffer. If the
1610 * _does_ cache non-present entries, then it does so in the special
1611 * domain #0, which we have to flush:
1612 */
1613 if (cap_caching_mode(iommu->cap)) {
1614 iommu->flush.flush_context(iommu, 0,
1615 (((u16)bus) << 8) | devfn,
1616 DMA_CCMD_MASK_NOBIT,
1617 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001618 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001619 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001620 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001621 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001622 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001623 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001624
1625 spin_lock_irqsave(&domain->iommu_lock, flags);
1626 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1627 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001628 if (domain->iommu_count == 1)
1629 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001630 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001631 }
1632 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001633 return 0;
1634}
1635
1636static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001637domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1638 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001639{
1640 int ret;
1641 struct pci_dev *tmp, *parent;
1642
David Woodhouse276dbf992009-04-04 01:45:37 +01001643 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001644 pdev->bus->number, pdev->devfn,
1645 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001646 if (ret)
1647 return ret;
1648
1649 /* dependent device mapping */
1650 tmp = pci_find_upstream_pcie_bridge(pdev);
1651 if (!tmp)
1652 return 0;
1653 /* Secondary interface's bus number and devfn 0 */
1654 parent = pdev->bus->self;
1655 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001656 ret = domain_context_mapping_one(domain,
1657 pci_domain_nr(parent->bus),
1658 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001659 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001660 if (ret)
1661 return ret;
1662 parent = parent->bus->self;
1663 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001664 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001665 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001666 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001667 tmp->subordinate->number, 0,
1668 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001669 else /* this is a legacy PCI bridge */
1670 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001671 pci_domain_nr(tmp->bus),
1672 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001673 tmp->devfn,
1674 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001675}
1676
Weidong Han5331fe62008-12-08 23:00:00 +08001677static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001678{
1679 int ret;
1680 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001681 struct intel_iommu *iommu;
1682
David Woodhouse276dbf992009-04-04 01:45:37 +01001683 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1684 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001685 if (!iommu)
1686 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001687
David Woodhouse276dbf992009-04-04 01:45:37 +01001688 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001689 if (!ret)
1690 return ret;
1691 /* dependent device mapping */
1692 tmp = pci_find_upstream_pcie_bridge(pdev);
1693 if (!tmp)
1694 return ret;
1695 /* Secondary interface's bus number and devfn 0 */
1696 parent = pdev->bus->self;
1697 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001698 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001699 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001700 if (!ret)
1701 return ret;
1702 parent = parent->bus->self;
1703 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001704 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001705 return device_context_mapped(iommu, tmp->subordinate->number,
1706 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001707 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001708 return device_context_mapped(iommu, tmp->bus->number,
1709 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001710}
1711
Fenghua Yuf5329592009-08-04 15:09:37 -07001712/* Returns a number of VTD pages, but aligned to MM page size */
1713static inline unsigned long aligned_nrpages(unsigned long host_addr,
1714 size_t size)
1715{
1716 host_addr &= ~PAGE_MASK;
1717 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1718}
1719
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001720/* Return largest possible superpage level for a given mapping */
1721static inline int hardware_largepage_caps(struct dmar_domain *domain,
1722 unsigned long iov_pfn,
1723 unsigned long phy_pfn,
1724 unsigned long pages)
1725{
1726 int support, level = 1;
1727 unsigned long pfnmerge;
1728
1729 support = domain->iommu_superpage;
1730
1731 /* To use a large page, the virtual *and* physical addresses
1732 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1733 of them will mean we have to use smaller pages. So just
1734 merge them and check both at once. */
1735 pfnmerge = iov_pfn | phy_pfn;
1736
1737 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1738 pages >>= VTD_STRIDE_SHIFT;
1739 if (!pages)
1740 break;
1741 pfnmerge >>= VTD_STRIDE_SHIFT;
1742 level++;
1743 support--;
1744 }
1745 return level;
1746}
1747
David Woodhouse9051aa02009-06-29 12:30:54 +01001748static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1749 struct scatterlist *sg, unsigned long phys_pfn,
1750 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001751{
1752 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001753 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001754 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001755 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001756 unsigned int largepage_lvl = 0;
1757 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001758
1759 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1760
1761 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1762 return -EINVAL;
1763
1764 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1765
David Woodhouse9051aa02009-06-29 12:30:54 +01001766 if (sg)
1767 sg_res = 0;
1768 else {
1769 sg_res = nr_pages + 1;
1770 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1771 }
1772
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001773 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001774 uint64_t tmp;
1775
David Woodhousee1605492009-06-29 11:17:38 +01001776 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001777 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001778 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1779 sg->dma_length = sg->length;
1780 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001781 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001782 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001783
David Woodhousee1605492009-06-29 11:17:38 +01001784 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001785 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1786
1787 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01001788 if (!pte)
1789 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001790 /* It is large page*/
1791 if (largepage_lvl > 1)
1792 pteval |= DMA_PTE_LARGE_PAGE;
1793 else
1794 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
1795
David Woodhousee1605492009-06-29 11:17:38 +01001796 }
1797 /* We don't need lock here, nobody else
1798 * touches the iova range
1799 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001800 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001801 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001802 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001803 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1804 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001805 if (dumps) {
1806 dumps--;
1807 debug_dma_dump_mappings(NULL);
1808 }
1809 WARN_ON(1);
1810 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001811
1812 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1813
1814 BUG_ON(nr_pages < lvl_pages);
1815 BUG_ON(sg_res < lvl_pages);
1816
1817 nr_pages -= lvl_pages;
1818 iov_pfn += lvl_pages;
1819 phys_pfn += lvl_pages;
1820 pteval += lvl_pages * VTD_PAGE_SIZE;
1821 sg_res -= lvl_pages;
1822
1823 /* If the next PTE would be the first in a new page, then we
1824 need to flush the cache on the entries we've just written.
1825 And then we'll need to recalculate 'pte', so clear it and
1826 let it get set again in the if (!pte) block above.
1827
1828 If we're done (!nr_pages) we need to flush the cache too.
1829
1830 Also if we've been setting superpages, we may need to
1831 recalculate 'pte' and switch back to smaller pages for the
1832 end of the mapping, if the trailing size is not enough to
1833 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01001834 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001835 if (!nr_pages || first_pte_in_page(pte) ||
1836 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01001837 domain_flush_cache(domain, first_pte,
1838 (void *)pte - (void *)first_pte);
1839 pte = NULL;
1840 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001841
1842 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01001843 sg = sg_next(sg);
1844 }
1845 return 0;
1846}
1847
David Woodhouse9051aa02009-06-29 12:30:54 +01001848static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1849 struct scatterlist *sg, unsigned long nr_pages,
1850 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001851{
David Woodhouse9051aa02009-06-29 12:30:54 +01001852 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1853}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001854
David Woodhouse9051aa02009-06-29 12:30:54 +01001855static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1856 unsigned long phys_pfn, unsigned long nr_pages,
1857 int prot)
1858{
1859 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001860}
1861
Weidong Hanc7151a82008-12-08 22:51:37 +08001862static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001863{
Weidong Hanc7151a82008-12-08 22:51:37 +08001864 if (!iommu)
1865 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001866
1867 clear_context_table(iommu, bus, devfn);
1868 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001869 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001870 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001871}
1872
1873static void domain_remove_dev_info(struct dmar_domain *domain)
1874{
1875 struct device_domain_info *info;
1876 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001877 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001878
1879 spin_lock_irqsave(&device_domain_lock, flags);
1880 while (!list_empty(&domain->devices)) {
1881 info = list_entry(domain->devices.next,
1882 struct device_domain_info, link);
1883 list_del(&info->link);
1884 list_del(&info->global);
1885 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001886 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001887 spin_unlock_irqrestore(&device_domain_lock, flags);
1888
Yu Zhao93a23a72009-05-18 13:51:37 +08001889 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001890 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001891 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001892 free_devinfo_mem(info);
1893
1894 spin_lock_irqsave(&device_domain_lock, flags);
1895 }
1896 spin_unlock_irqrestore(&device_domain_lock, flags);
1897}
1898
1899/*
1900 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001901 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001902 */
Kay, Allen M38717942008-09-09 18:37:29 +03001903static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001904find_domain(struct pci_dev *pdev)
1905{
1906 struct device_domain_info *info;
1907
1908 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001909 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001910 if (info)
1911 return info->domain;
1912 return NULL;
1913}
1914
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001915/* domain is initialized */
1916static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1917{
1918 struct dmar_domain *domain, *found = NULL;
1919 struct intel_iommu *iommu;
1920 struct dmar_drhd_unit *drhd;
1921 struct device_domain_info *info, *tmp;
1922 struct pci_dev *dev_tmp;
1923 unsigned long flags;
1924 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001925 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001926 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001927
1928 domain = find_domain(pdev);
1929 if (domain)
1930 return domain;
1931
David Woodhouse276dbf992009-04-04 01:45:37 +01001932 segment = pci_domain_nr(pdev->bus);
1933
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001934 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1935 if (dev_tmp) {
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001936 if (pci_is_pcie(dev_tmp)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001937 bus = dev_tmp->subordinate->number;
1938 devfn = 0;
1939 } else {
1940 bus = dev_tmp->bus->number;
1941 devfn = dev_tmp->devfn;
1942 }
1943 spin_lock_irqsave(&device_domain_lock, flags);
1944 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001945 if (info->segment == segment &&
1946 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001947 found = info->domain;
1948 break;
1949 }
1950 }
1951 spin_unlock_irqrestore(&device_domain_lock, flags);
1952 /* pcie-pci bridge already has a domain, uses it */
1953 if (found) {
1954 domain = found;
1955 goto found_domain;
1956 }
1957 }
1958
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001959 domain = alloc_domain();
1960 if (!domain)
1961 goto error;
1962
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001963 /* Allocate new domain for the device */
1964 drhd = dmar_find_matched_drhd_unit(pdev);
1965 if (!drhd) {
1966 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1967 pci_name(pdev));
1968 return NULL;
1969 }
1970 iommu = drhd->iommu;
1971
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001972 ret = iommu_attach_domain(domain, iommu);
1973 if (ret) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07001974 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001975 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001976 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001977
1978 if (domain_init(domain, gaw)) {
1979 domain_exit(domain);
1980 goto error;
1981 }
1982
1983 /* register pcie-to-pci device */
1984 if (dev_tmp) {
1985 info = alloc_devinfo_mem();
1986 if (!info) {
1987 domain_exit(domain);
1988 goto error;
1989 }
David Woodhouse276dbf992009-04-04 01:45:37 +01001990 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001991 info->bus = bus;
1992 info->devfn = devfn;
1993 info->dev = NULL;
1994 info->domain = domain;
1995 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08001996 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001997
1998 /* pcie-to-pci bridge already has a domain, uses it */
1999 found = NULL;
2000 spin_lock_irqsave(&device_domain_lock, flags);
2001 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01002002 if (tmp->segment == segment &&
2003 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002004 found = tmp->domain;
2005 break;
2006 }
2007 }
2008 if (found) {
Jiri Slaby00dfff72010-06-14 17:17:32 +02002009 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002010 free_devinfo_mem(info);
2011 domain_exit(domain);
2012 domain = found;
2013 } else {
2014 list_add(&info->link, &domain->devices);
2015 list_add(&info->global, &device_domain_list);
Jiri Slaby00dfff72010-06-14 17:17:32 +02002016 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002017 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002018 }
2019
2020found_domain:
2021 info = alloc_devinfo_mem();
2022 if (!info)
2023 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01002024 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002025 info->bus = pdev->bus->number;
2026 info->devfn = pdev->devfn;
2027 info->dev = pdev;
2028 info->domain = domain;
2029 spin_lock_irqsave(&device_domain_lock, flags);
2030 /* somebody is fast */
2031 found = find_domain(pdev);
2032 if (found != NULL) {
2033 spin_unlock_irqrestore(&device_domain_lock, flags);
2034 if (found != domain) {
2035 domain_exit(domain);
2036 domain = found;
2037 }
2038 free_devinfo_mem(info);
2039 return domain;
2040 }
2041 list_add(&info->link, &domain->devices);
2042 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002043 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002044 spin_unlock_irqrestore(&device_domain_lock, flags);
2045 return domain;
2046error:
2047 /* recheck it here, maybe others set it */
2048 return find_domain(pdev);
2049}
2050
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002051static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002052#define IDENTMAP_ALL 1
2053#define IDENTMAP_GFX 2
2054#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002055
David Woodhouseb2132032009-06-26 18:50:28 +01002056static int iommu_domain_identity_map(struct dmar_domain *domain,
2057 unsigned long long start,
2058 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002059{
David Woodhousec5395d52009-06-28 16:35:56 +01002060 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2061 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002062
David Woodhousec5395d52009-06-28 16:35:56 +01002063 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2064 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002065 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002066 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002067 }
2068
David Woodhousec5395d52009-06-28 16:35:56 +01002069 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2070 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002071 /*
2072 * RMRR range might have overlap with physical memory range,
2073 * clear it first
2074 */
David Woodhousec5395d52009-06-28 16:35:56 +01002075 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002076
David Woodhousec5395d52009-06-28 16:35:56 +01002077 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2078 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002079 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002080}
2081
2082static int iommu_prepare_identity_map(struct pci_dev *pdev,
2083 unsigned long long start,
2084 unsigned long long end)
2085{
2086 struct dmar_domain *domain;
2087 int ret;
2088
David Woodhousec7ab48d2009-06-26 19:10:36 +01002089 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002090 if (!domain)
2091 return -ENOMEM;
2092
David Woodhouse19943b02009-08-04 16:19:20 +01002093 /* For _hardware_ passthrough, don't bother. But for software
2094 passthrough, we do it anyway -- it may indicate a memory
2095 range which is reserved in E820, so which didn't get set
2096 up to start with in si_domain */
2097 if (domain == si_domain && hw_pass_through) {
2098 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2099 pci_name(pdev), start, end);
2100 return 0;
2101 }
2102
2103 printk(KERN_INFO
2104 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2105 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002106
David Woodhouse5595b522009-12-02 09:21:55 +00002107 if (end < start) {
2108 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2109 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2110 dmi_get_system_info(DMI_BIOS_VENDOR),
2111 dmi_get_system_info(DMI_BIOS_VERSION),
2112 dmi_get_system_info(DMI_PRODUCT_VERSION));
2113 ret = -EIO;
2114 goto error;
2115 }
2116
David Woodhouse2ff729f2009-08-26 14:25:41 +01002117 if (end >> agaw_to_width(domain->agaw)) {
2118 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2119 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2120 agaw_to_width(domain->agaw),
2121 dmi_get_system_info(DMI_BIOS_VENDOR),
2122 dmi_get_system_info(DMI_BIOS_VERSION),
2123 dmi_get_system_info(DMI_PRODUCT_VERSION));
2124 ret = -EIO;
2125 goto error;
2126 }
David Woodhouse19943b02009-08-04 16:19:20 +01002127
David Woodhouseb2132032009-06-26 18:50:28 +01002128 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002129 if (ret)
2130 goto error;
2131
2132 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002133 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002134 if (ret)
2135 goto error;
2136
2137 return 0;
2138
2139 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002140 domain_exit(domain);
2141 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002142}
2143
2144static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2145 struct pci_dev *pdev)
2146{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002147 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002148 return 0;
2149 return iommu_prepare_identity_map(pdev, rmrr->base_address,
David Woodhouse70e535d2011-05-31 00:22:52 +01002150 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002151}
2152
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002153#ifdef CONFIG_DMAR_FLOPPY_WA
2154static inline void iommu_prepare_isa(void)
2155{
2156 struct pci_dev *pdev;
2157 int ret;
2158
2159 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2160 if (!pdev)
2161 return;
2162
David Woodhousec7ab48d2009-06-26 19:10:36 +01002163 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse70e535d2011-05-31 00:22:52 +01002164 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002165
2166 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002167 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2168 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002169
2170}
2171#else
2172static inline void iommu_prepare_isa(void)
2173{
2174 return;
2175}
2176#endif /* !CONFIG_DMAR_FLPY_WA */
2177
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002178static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002179
2180static int __init si_domain_work_fn(unsigned long start_pfn,
2181 unsigned long end_pfn, void *datax)
2182{
2183 int *ret = datax;
2184
2185 *ret = iommu_domain_identity_map(si_domain,
2186 (uint64_t)start_pfn << PAGE_SHIFT,
2187 (uint64_t)end_pfn << PAGE_SHIFT);
2188 return *ret;
2189
2190}
2191
Matt Kraai071e1372009-08-23 22:30:22 -07002192static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002193{
2194 struct dmar_drhd_unit *drhd;
2195 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002196 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002197
2198 si_domain = alloc_domain();
2199 if (!si_domain)
2200 return -EFAULT;
2201
David Woodhousec7ab48d2009-06-26 19:10:36 +01002202 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002203
2204 for_each_active_iommu(iommu, drhd) {
2205 ret = iommu_attach_domain(si_domain, iommu);
2206 if (ret) {
2207 domain_exit(si_domain);
2208 return -EFAULT;
2209 }
2210 }
2211
2212 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2213 domain_exit(si_domain);
2214 return -EFAULT;
2215 }
2216
2217 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2218
David Woodhouse19943b02009-08-04 16:19:20 +01002219 if (hw)
2220 return 0;
2221
David Woodhousec7ab48d2009-06-26 19:10:36 +01002222 for_each_online_node(nid) {
2223 work_with_active_regions(nid, si_domain_work_fn, &ret);
2224 if (ret)
2225 return ret;
2226 }
2227
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002228 return 0;
2229}
2230
2231static void domain_remove_one_dev_info(struct dmar_domain *domain,
2232 struct pci_dev *pdev);
2233static int identity_mapping(struct pci_dev *pdev)
2234{
2235 struct device_domain_info *info;
2236
2237 if (likely(!iommu_identity_mapping))
2238 return 0;
2239
Mike Traviscb452a42011-05-28 13:15:03 -05002240 info = pdev->dev.archdata.iommu;
2241 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2242 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002243
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002244 return 0;
2245}
2246
2247static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002248 struct pci_dev *pdev,
2249 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002250{
2251 struct device_domain_info *info;
2252 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002253 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002254
2255 info = alloc_devinfo_mem();
2256 if (!info)
2257 return -ENOMEM;
2258
David Woodhouse5fe60f42009-08-09 10:53:41 +01002259 ret = domain_context_mapping(domain, pdev, translation);
2260 if (ret) {
2261 free_devinfo_mem(info);
2262 return ret;
2263 }
2264
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002265 info->segment = pci_domain_nr(pdev->bus);
2266 info->bus = pdev->bus->number;
2267 info->devfn = pdev->devfn;
2268 info->dev = pdev;
2269 info->domain = domain;
2270
2271 spin_lock_irqsave(&device_domain_lock, flags);
2272 list_add(&info->link, &domain->devices);
2273 list_add(&info->global, &device_domain_list);
2274 pdev->dev.archdata.iommu = info;
2275 spin_unlock_irqrestore(&device_domain_lock, flags);
2276
2277 return 0;
2278}
2279
David Woodhouse6941af22009-07-04 18:24:27 +01002280static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2281{
David Woodhousee0fc7e02009-09-30 09:12:17 -07002282 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2283 return 1;
2284
2285 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2286 return 1;
2287
2288 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2289 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002290
David Woodhouse3dfc8132009-07-04 19:11:08 +01002291 /*
2292 * We want to start off with all devices in the 1:1 domain, and
2293 * take them out later if we find they can't access all of memory.
2294 *
2295 * However, we can't do this for PCI devices behind bridges,
2296 * because all PCI devices behind the same bridge will end up
2297 * with the same source-id on their transactions.
2298 *
2299 * Practically speaking, we can't change things around for these
2300 * devices at run-time, because we can't be sure there'll be no
2301 * DMA transactions in flight for any of their siblings.
2302 *
2303 * So PCI devices (unless they're on the root bus) as well as
2304 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2305 * the 1:1 domain, just in _case_ one of their siblings turns out
2306 * not to be able to map all of memory.
2307 */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002308 if (!pci_is_pcie(pdev)) {
David Woodhouse3dfc8132009-07-04 19:11:08 +01002309 if (!pci_is_root_bus(pdev->bus))
2310 return 0;
2311 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2312 return 0;
2313 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2314 return 0;
2315
2316 /*
2317 * At boot time, we don't yet know if devices will be 64-bit capable.
2318 * Assume that they will -- if they turn out not to be, then we can
2319 * take them out of the 1:1 domain later.
2320 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002321 if (!startup) {
2322 /*
2323 * If the device's dma_mask is less than the system's memory
2324 * size then this is not a candidate for identity mapping.
2325 */
2326 u64 dma_mask = pdev->dma_mask;
2327
2328 if (pdev->dev.coherent_dma_mask &&
2329 pdev->dev.coherent_dma_mask < dma_mask)
2330 dma_mask = pdev->dev.coherent_dma_mask;
2331
2332 return dma_mask >= dma_get_required_mask(&pdev->dev);
2333 }
David Woodhouse6941af22009-07-04 18:24:27 +01002334
2335 return 1;
2336}
2337
Matt Kraai071e1372009-08-23 22:30:22 -07002338static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002339{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002340 struct pci_dev *pdev = NULL;
2341 int ret;
2342
David Woodhouse19943b02009-08-04 16:19:20 +01002343 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002344 if (ret)
2345 return -EFAULT;
2346
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002347 for_each_pci_dev(pdev) {
Mike Travis825507d2011-05-28 13:15:06 -05002348 /* Skip Host/PCI Bridge devices */
2349 if (IS_BRIDGE_HOST_DEVICE(pdev))
2350 continue;
David Woodhouse6941af22009-07-04 18:24:27 +01002351 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse19943b02009-08-04 16:19:20 +01002352 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2353 hw ? "hardware" : "software", pci_name(pdev));
David Woodhousec7ab48d2009-06-26 19:10:36 +01002354
David Woodhouse5fe60f42009-08-09 10:53:41 +01002355 ret = domain_add_dev_info(si_domain, pdev,
David Woodhouse19943b02009-08-04 16:19:20 +01002356 hw ? CONTEXT_TT_PASS_THROUGH :
David Woodhouse62edf5d2009-07-04 10:59:46 +01002357 CONTEXT_TT_MULTI_LEVEL);
2358 if (ret)
2359 return ret;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002360 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002361 }
2362
2363 return 0;
2364}
2365
Joseph Cihulab7792602011-05-03 00:08:37 -07002366static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002367{
2368 struct dmar_drhd_unit *drhd;
2369 struct dmar_rmrr_unit *rmrr;
2370 struct pci_dev *pdev;
2371 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002372 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002373
2374 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002375 * for each drhd
2376 * allocate root
2377 * initialize and program root entry to not present
2378 * endfor
2379 */
2380 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002381 g_num_of_iommus++;
2382 /*
2383 * lock not needed as this is only incremented in the single
2384 * threaded kernel __init code path all other access are read
2385 * only
2386 */
2387 }
2388
Weidong Hand9630fe2008-12-08 11:06:32 +08002389 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2390 GFP_KERNEL);
2391 if (!g_iommus) {
2392 printk(KERN_ERR "Allocating global iommu array failed\n");
2393 ret = -ENOMEM;
2394 goto error;
2395 }
2396
mark gross80b20dd2008-04-18 13:53:58 -07002397 deferred_flush = kzalloc(g_num_of_iommus *
2398 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2399 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002400 ret = -ENOMEM;
2401 goto error;
2402 }
2403
mark gross5e0d2a62008-03-04 15:22:08 -08002404 for_each_drhd_unit(drhd) {
2405 if (drhd->ignored)
2406 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002407
2408 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002409 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002410
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002411 ret = iommu_init_domains(iommu);
2412 if (ret)
2413 goto error;
2414
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002415 /*
2416 * TBD:
2417 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002418 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002419 */
2420 ret = iommu_alloc_root_entry(iommu);
2421 if (ret) {
2422 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2423 goto error;
2424 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002425 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002426 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002427 }
2428
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002429 /*
2430 * Start from the sane iommu hardware state.
2431 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002432 for_each_drhd_unit(drhd) {
2433 if (drhd->ignored)
2434 continue;
2435
2436 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002437
2438 /*
2439 * If the queued invalidation is already initialized by us
2440 * (for example, while enabling interrupt-remapping) then
2441 * we got the things already rolling from a sane state.
2442 */
2443 if (iommu->qi)
2444 continue;
2445
2446 /*
2447 * Clear any previous faults.
2448 */
2449 dmar_fault(-1, iommu);
2450 /*
2451 * Disable queued invalidation if supported and already enabled
2452 * before OS handover.
2453 */
2454 dmar_disable_qi(iommu);
2455 }
2456
2457 for_each_drhd_unit(drhd) {
2458 if (drhd->ignored)
2459 continue;
2460
2461 iommu = drhd->iommu;
2462
Youquan Songa77b67d2008-10-16 16:31:56 -07002463 if (dmar_enable_qi(iommu)) {
2464 /*
2465 * Queued Invalidate not enabled, use Register Based
2466 * Invalidate
2467 */
2468 iommu->flush.flush_context = __iommu_flush_context;
2469 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002470 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002471 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002472 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002473 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002474 } else {
2475 iommu->flush.flush_context = qi_flush_context;
2476 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002477 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002478 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002479 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002480 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002481 }
2482 }
2483
David Woodhouse19943b02009-08-04 16:19:20 +01002484 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002485 iommu_identity_mapping |= IDENTMAP_ALL;
2486
David Woodhouse19943b02009-08-04 16:19:20 +01002487#ifdef CONFIG_DMAR_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002488 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002489#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002490
2491 check_tylersburg_isoch();
2492
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002493 /*
2494 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002495 * identity mappings for rmrr, gfx, and isa and may fall back to static
2496 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002497 */
David Woodhouse19943b02009-08-04 16:19:20 +01002498 if (iommu_identity_mapping) {
2499 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2500 if (ret) {
2501 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2502 goto error;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002503 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002504 }
David Woodhouse19943b02009-08-04 16:19:20 +01002505 /*
2506 * For each rmrr
2507 * for each dev attached to rmrr
2508 * do
2509 * locate drhd for dev, alloc domain for dev
2510 * allocate free domain
2511 * allocate page table entries for rmrr
2512 * if context not allocated for bus
2513 * allocate and init context
2514 * set present in root table for this bus
2515 * init context with domain, translation etc
2516 * endfor
2517 * endfor
2518 */
2519 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2520 for_each_rmrr_units(rmrr) {
2521 for (i = 0; i < rmrr->devices_cnt; i++) {
2522 pdev = rmrr->devices[i];
2523 /*
2524 * some BIOS lists non-exist devices in DMAR
2525 * table.
2526 */
2527 if (!pdev)
2528 continue;
2529 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2530 if (ret)
2531 printk(KERN_ERR
2532 "IOMMU: mapping reserved region failed\n");
2533 }
2534 }
2535
2536 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002537
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002538 /*
2539 * for each drhd
2540 * enable fault log
2541 * global invalidate context cache
2542 * global invalidate iotlb
2543 * enable translation
2544 */
2545 for_each_drhd_unit(drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002546 if (drhd->ignored) {
2547 /*
2548 * we always have to disable PMRs or DMA may fail on
2549 * this device
2550 */
2551 if (force_on)
2552 iommu_disable_protect_mem_regions(drhd->iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002553 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002554 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002555 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002556
2557 iommu_flush_write_buffer(iommu);
2558
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002559 ret = dmar_set_interrupt(iommu);
2560 if (ret)
2561 goto error;
2562
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002563 iommu_set_root_entry(iommu);
2564
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002565 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002566 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002567
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002568 ret = iommu_enable_translation(iommu);
2569 if (ret)
2570 goto error;
David Woodhouseb94996c2009-09-19 15:28:12 -07002571
2572 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002573 }
2574
2575 return 0;
2576error:
2577 for_each_drhd_unit(drhd) {
2578 if (drhd->ignored)
2579 continue;
2580 iommu = drhd->iommu;
2581 free_iommu(iommu);
2582 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002583 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002584 return ret;
2585}
2586
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002587/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002588static struct iova *intel_alloc_iova(struct device *dev,
2589 struct dmar_domain *domain,
2590 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002591{
2592 struct pci_dev *pdev = to_pci_dev(dev);
2593 struct iova *iova = NULL;
2594
David Woodhouse875764d2009-06-28 21:20:51 +01002595 /* Restrict dma_mask to the width that the iommu can handle */
2596 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2597
2598 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002599 /*
2600 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002601 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002602 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002603 */
David Woodhouse875764d2009-06-28 21:20:51 +01002604 iova = alloc_iova(&domain->iovad, nrpages,
2605 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2606 if (iova)
2607 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002608 }
David Woodhouse875764d2009-06-28 21:20:51 +01002609 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2610 if (unlikely(!iova)) {
2611 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2612 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002613 return NULL;
2614 }
2615
2616 return iova;
2617}
2618
David Woodhouse147202a2009-07-07 19:43:20 +01002619static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002620{
2621 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002622 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002623
2624 domain = get_domain_for_dev(pdev,
2625 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2626 if (!domain) {
2627 printk(KERN_ERR
2628 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002629 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002630 }
2631
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002632 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002633 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002634 ret = domain_context_mapping(domain, pdev,
2635 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002636 if (ret) {
2637 printk(KERN_ERR
2638 "Domain context map for %s failed",
2639 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002640 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002641 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002642 }
2643
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002644 return domain;
2645}
2646
David Woodhouse147202a2009-07-07 19:43:20 +01002647static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2648{
2649 struct device_domain_info *info;
2650
2651 /* No lock here, assumes no domain exit in normal case */
2652 info = dev->dev.archdata.iommu;
2653 if (likely(info))
2654 return info->domain;
2655
2656 return __get_valid_domain_for_dev(dev);
2657}
2658
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002659static int iommu_dummy(struct pci_dev *pdev)
2660{
2661 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2662}
2663
2664/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002665static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002666{
David Woodhouse73676832009-07-04 14:08:36 +01002667 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002668 int found;
2669
David Woodhouse73676832009-07-04 14:08:36 +01002670 if (unlikely(dev->bus != &pci_bus_type))
2671 return 1;
2672
2673 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002674 if (iommu_dummy(pdev))
2675 return 1;
2676
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002677 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002678 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002679
2680 found = identity_mapping(pdev);
2681 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002682 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002683 return 1;
2684 else {
2685 /*
2686 * 32 bit DMA is removed from si_domain and fall back
2687 * to non-identity mapping.
2688 */
2689 domain_remove_one_dev_info(si_domain, pdev);
2690 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2691 pci_name(pdev));
2692 return 0;
2693 }
2694 } else {
2695 /*
2696 * In case of a detached 64 bit DMA device from vm, the device
2697 * is put into si_domain for identity mapping.
2698 */
David Woodhouse6941af22009-07-04 18:24:27 +01002699 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002700 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002701 ret = domain_add_dev_info(si_domain, pdev,
2702 hw_pass_through ?
2703 CONTEXT_TT_PASS_THROUGH :
2704 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002705 if (!ret) {
2706 printk(KERN_INFO "64bit %s uses identity mapping\n",
2707 pci_name(pdev));
2708 return 1;
2709 }
2710 }
2711 }
2712
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002713 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002714}
2715
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002716static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2717 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002718{
2719 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002720 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002721 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002722 struct iova *iova;
2723 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002724 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002725 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002726 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002727
2728 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002729
David Woodhouse73676832009-07-04 14:08:36 +01002730 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002731 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002732
2733 domain = get_valid_domain_for_dev(pdev);
2734 if (!domain)
2735 return 0;
2736
Weidong Han8c11e792008-12-08 15:29:22 +08002737 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002738 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002739
Mike Travisc681d0b2011-05-28 13:15:05 -05002740 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002741 if (!iova)
2742 goto error;
2743
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002744 /*
2745 * Check if DMAR supports zero-length reads on write only
2746 * mappings..
2747 */
2748 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002749 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002750 prot |= DMA_PTE_READ;
2751 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2752 prot |= DMA_PTE_WRITE;
2753 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002754 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002755 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002756 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002757 * is not a big problem
2758 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002759 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002760 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002761 if (ret)
2762 goto error;
2763
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002764 /* it's a non-present to present mapping. Only flush if caching mode */
2765 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002766 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002767 else
Weidong Han8c11e792008-12-08 15:29:22 +08002768 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002769
David Woodhouse03d6a242009-06-28 15:33:46 +01002770 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2771 start_paddr += paddr & ~PAGE_MASK;
2772 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002773
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002774error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002775 if (iova)
2776 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002777 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002778 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002779 return 0;
2780}
2781
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002782static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2783 unsigned long offset, size_t size,
2784 enum dma_data_direction dir,
2785 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002786{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002787 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2788 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002789}
2790
mark gross5e0d2a62008-03-04 15:22:08 -08002791static void flush_unmaps(void)
2792{
mark gross80b20dd2008-04-18 13:53:58 -07002793 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002794
mark gross5e0d2a62008-03-04 15:22:08 -08002795 timer_on = 0;
2796
2797 /* just flush them all */
2798 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002799 struct intel_iommu *iommu = g_iommus[i];
2800 if (!iommu)
2801 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002802
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002803 if (!deferred_flush[i].next)
2804 continue;
2805
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002806 /* In caching mode, global flushes turn emulation expensive */
2807 if (!cap_caching_mode(iommu->cap))
2808 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002809 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002810 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002811 unsigned long mask;
2812 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002813 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08002814
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002815 /* On real hardware multiple invalidations are expensive */
2816 if (cap_caching_mode(iommu->cap))
2817 iommu_flush_iotlb_psi(iommu, domain->id,
2818 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2819 else {
2820 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2821 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2822 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2823 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002824 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002825 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002826 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002827 }
2828
mark gross5e0d2a62008-03-04 15:22:08 -08002829 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002830}
2831
2832static void flush_unmaps_timeout(unsigned long data)
2833{
mark gross80b20dd2008-04-18 13:53:58 -07002834 unsigned long flags;
2835
2836 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002837 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002838 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002839}
2840
2841static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2842{
2843 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002844 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002845 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002846
2847 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002848 if (list_size == HIGH_WATER_MARK)
2849 flush_unmaps();
2850
Weidong Han8c11e792008-12-08 15:29:22 +08002851 iommu = domain_get_iommu(dom);
2852 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002853
mark gross80b20dd2008-04-18 13:53:58 -07002854 next = deferred_flush[iommu_id].next;
2855 deferred_flush[iommu_id].domain[next] = dom;
2856 deferred_flush[iommu_id].iova[next] = iova;
2857 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002858
2859 if (!timer_on) {
2860 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2861 timer_on = 1;
2862 }
2863 list_size++;
2864 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2865}
2866
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002867static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2868 size_t size, enum dma_data_direction dir,
2869 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002870{
2871 struct pci_dev *pdev = to_pci_dev(dev);
2872 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002873 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002874 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002875 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002876
David Woodhouse73676832009-07-04 14:08:36 +01002877 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002878 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002879
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002880 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002881 BUG_ON(!domain);
2882
Weidong Han8c11e792008-12-08 15:29:22 +08002883 iommu = domain_get_iommu(domain);
2884
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002885 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002886 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2887 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002888 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002889
David Woodhoused794dc92009-06-28 00:27:49 +01002890 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2891 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002892
David Woodhoused794dc92009-06-28 00:27:49 +01002893 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2894 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002895
2896 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002897 dma_pte_clear_range(domain, start_pfn, last_pfn);
2898
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002899 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002900 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2901
mark gross5e0d2a62008-03-04 15:22:08 -08002902 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002903 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002904 last_pfn - start_pfn + 1, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08002905 /* free iova */
2906 __free_iova(&domain->iovad, iova);
2907 } else {
2908 add_unmap(domain, iova);
2909 /*
2910 * queue up the release of the unmap to save the 1/6th of the
2911 * cpu used up by the iotlb flush operation...
2912 */
mark gross5e0d2a62008-03-04 15:22:08 -08002913 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002914}
2915
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002916static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2917 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002918{
2919 void *vaddr;
2920 int order;
2921
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002922 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002923 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07002924
2925 if (!iommu_no_mapping(hwdev))
2926 flags &= ~(GFP_DMA | GFP_DMA32);
2927 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2928 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2929 flags |= GFP_DMA;
2930 else
2931 flags |= GFP_DMA32;
2932 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002933
2934 vaddr = (void *)__get_free_pages(flags, order);
2935 if (!vaddr)
2936 return NULL;
2937 memset(vaddr, 0, size);
2938
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002939 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2940 DMA_BIDIRECTIONAL,
2941 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002942 if (*dma_handle)
2943 return vaddr;
2944 free_pages((unsigned long)vaddr, order);
2945 return NULL;
2946}
2947
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002948static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2949 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002950{
2951 int order;
2952
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002953 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002954 order = get_order(size);
2955
David Woodhouse0db9b7a2009-07-14 02:01:57 +01002956 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002957 free_pages((unsigned long)vaddr, order);
2958}
2959
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002960static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2961 int nelems, enum dma_data_direction dir,
2962 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002963{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002964 struct pci_dev *pdev = to_pci_dev(hwdev);
2965 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002966 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002967 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002968 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002969
David Woodhouse73676832009-07-04 14:08:36 +01002970 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002971 return;
2972
2973 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002974 BUG_ON(!domain);
2975
2976 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002977
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002978 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01002979 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
2980 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002981 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002982
David Woodhoused794dc92009-06-28 00:27:49 +01002983 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2984 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002985
2986 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002987 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002988
David Woodhoused794dc92009-06-28 00:27:49 +01002989 /* free page tables */
2990 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2991
David Woodhouseacea0012009-07-14 01:55:11 +01002992 if (intel_iommu_strict) {
2993 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002994 last_pfn - start_pfn + 1, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01002995 /* free iova */
2996 __free_iova(&domain->iovad, iova);
2997 } else {
2998 add_unmap(domain, iova);
2999 /*
3000 * queue up the release of the unmap to save the 1/6th of the
3001 * cpu used up by the iotlb flush operation...
3002 */
3003 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003004}
3005
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003006static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003007 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003008{
3009 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003010 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003011
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003012 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003013 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003014 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003015 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003016 }
3017 return nelems;
3018}
3019
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003020static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3021 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003022{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003023 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003024 struct pci_dev *pdev = to_pci_dev(hwdev);
3025 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003026 size_t size = 0;
3027 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003028 struct iova *iova = NULL;
3029 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003030 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003031 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003032 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003033
3034 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01003035 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003036 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003037
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003038 domain = get_valid_domain_for_dev(pdev);
3039 if (!domain)
3040 return 0;
3041
Weidong Han8c11e792008-12-08 15:29:22 +08003042 iommu = domain_get_iommu(domain);
3043
David Woodhouseb536d242009-06-28 14:49:31 +01003044 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003045 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003046
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003047 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3048 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003049 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003050 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003051 return 0;
3052 }
3053
3054 /*
3055 * Check if DMAR supports zero-length reads on write only
3056 * mappings..
3057 */
3058 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003059 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003060 prot |= DMA_PTE_READ;
3061 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3062 prot |= DMA_PTE_WRITE;
3063
David Woodhouseb536d242009-06-28 14:49:31 +01003064 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003065
Fenghua Yuf5329592009-08-04 15:09:37 -07003066 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003067 if (unlikely(ret)) {
3068 /* clear the page */
3069 dma_pte_clear_range(domain, start_vpfn,
3070 start_vpfn + size - 1);
3071 /* free page tables */
3072 dma_pte_free_pagetable(domain, start_vpfn,
3073 start_vpfn + size - 1);
3074 /* free iova */
3075 __free_iova(&domain->iovad, iova);
3076 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003077 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003078
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003079 /* it's a non-present to present mapping. Only flush if caching mode */
3080 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03003081 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003082 else
Weidong Han8c11e792008-12-08 15:29:22 +08003083 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003084
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003085 return nelems;
3086}
3087
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003088static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3089{
3090 return !dma_addr;
3091}
3092
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003093struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003094 .alloc_coherent = intel_alloc_coherent,
3095 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003096 .map_sg = intel_map_sg,
3097 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003098 .map_page = intel_map_page,
3099 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003100 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003101};
3102
3103static inline int iommu_domain_cache_init(void)
3104{
3105 int ret = 0;
3106
3107 iommu_domain_cache = kmem_cache_create("iommu_domain",
3108 sizeof(struct dmar_domain),
3109 0,
3110 SLAB_HWCACHE_ALIGN,
3111
3112 NULL);
3113 if (!iommu_domain_cache) {
3114 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3115 ret = -ENOMEM;
3116 }
3117
3118 return ret;
3119}
3120
3121static inline int iommu_devinfo_cache_init(void)
3122{
3123 int ret = 0;
3124
3125 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3126 sizeof(struct device_domain_info),
3127 0,
3128 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003129 NULL);
3130 if (!iommu_devinfo_cache) {
3131 printk(KERN_ERR "Couldn't create devinfo cache\n");
3132 ret = -ENOMEM;
3133 }
3134
3135 return ret;
3136}
3137
3138static inline int iommu_iova_cache_init(void)
3139{
3140 int ret = 0;
3141
3142 iommu_iova_cache = kmem_cache_create("iommu_iova",
3143 sizeof(struct iova),
3144 0,
3145 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003146 NULL);
3147 if (!iommu_iova_cache) {
3148 printk(KERN_ERR "Couldn't create iova cache\n");
3149 ret = -ENOMEM;
3150 }
3151
3152 return ret;
3153}
3154
3155static int __init iommu_init_mempool(void)
3156{
3157 int ret;
3158 ret = iommu_iova_cache_init();
3159 if (ret)
3160 return ret;
3161
3162 ret = iommu_domain_cache_init();
3163 if (ret)
3164 goto domain_error;
3165
3166 ret = iommu_devinfo_cache_init();
3167 if (!ret)
3168 return ret;
3169
3170 kmem_cache_destroy(iommu_domain_cache);
3171domain_error:
3172 kmem_cache_destroy(iommu_iova_cache);
3173
3174 return -ENOMEM;
3175}
3176
3177static void __init iommu_exit_mempool(void)
3178{
3179 kmem_cache_destroy(iommu_devinfo_cache);
3180 kmem_cache_destroy(iommu_domain_cache);
3181 kmem_cache_destroy(iommu_iova_cache);
3182
3183}
3184
Dan Williams556ab452010-07-23 15:47:56 -07003185static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3186{
3187 struct dmar_drhd_unit *drhd;
3188 u32 vtbar;
3189 int rc;
3190
3191 /* We know that this device on this chipset has its own IOMMU.
3192 * If we find it under a different IOMMU, then the BIOS is lying
3193 * to us. Hope that the IOMMU for this device is actually
3194 * disabled, and it needs no translation...
3195 */
3196 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3197 if (rc) {
3198 /* "can't" happen */
3199 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3200 return;
3201 }
3202 vtbar &= 0xffff0000;
3203
3204 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3205 drhd = dmar_find_matched_drhd_unit(pdev);
3206 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3207 TAINT_FIRMWARE_WORKAROUND,
3208 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3209 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3210}
3211DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3212
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003213static void __init init_no_remapping_devices(void)
3214{
3215 struct dmar_drhd_unit *drhd;
3216
3217 for_each_drhd_unit(drhd) {
3218 if (!drhd->include_all) {
3219 int i;
3220 for (i = 0; i < drhd->devices_cnt; i++)
3221 if (drhd->devices[i] != NULL)
3222 break;
3223 /* ignore DMAR unit if no pci devices exist */
3224 if (i == drhd->devices_cnt)
3225 drhd->ignored = 1;
3226 }
3227 }
3228
3229 if (dmar_map_gfx)
3230 return;
3231
3232 for_each_drhd_unit(drhd) {
3233 int i;
3234 if (drhd->ignored || drhd->include_all)
3235 continue;
3236
3237 for (i = 0; i < drhd->devices_cnt; i++)
3238 if (drhd->devices[i] &&
3239 !IS_GFX_DEVICE(drhd->devices[i]))
3240 break;
3241
3242 if (i < drhd->devices_cnt)
3243 continue;
3244
3245 /* bypass IOMMU if it is just for gfx devices */
3246 drhd->ignored = 1;
3247 for (i = 0; i < drhd->devices_cnt; i++) {
3248 if (!drhd->devices[i])
3249 continue;
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07003250 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003251 }
3252 }
3253}
3254
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003255#ifdef CONFIG_SUSPEND
3256static int init_iommu_hw(void)
3257{
3258 struct dmar_drhd_unit *drhd;
3259 struct intel_iommu *iommu = NULL;
3260
3261 for_each_active_iommu(iommu, drhd)
3262 if (iommu->qi)
3263 dmar_reenable_qi(iommu);
3264
Joseph Cihulab7792602011-05-03 00:08:37 -07003265 for_each_iommu(iommu, drhd) {
3266 if (drhd->ignored) {
3267 /*
3268 * we always have to disable PMRs or DMA may fail on
3269 * this device
3270 */
3271 if (force_on)
3272 iommu_disable_protect_mem_regions(iommu);
3273 continue;
3274 }
3275
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003276 iommu_flush_write_buffer(iommu);
3277
3278 iommu_set_root_entry(iommu);
3279
3280 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003281 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003282 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003283 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003284 if (iommu_enable_translation(iommu))
3285 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003286 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003287 }
3288
3289 return 0;
3290}
3291
3292static void iommu_flush_all(void)
3293{
3294 struct dmar_drhd_unit *drhd;
3295 struct intel_iommu *iommu;
3296
3297 for_each_active_iommu(iommu, drhd) {
3298 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003299 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003300 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003301 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003302 }
3303}
3304
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003305static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003306{
3307 struct dmar_drhd_unit *drhd;
3308 struct intel_iommu *iommu = NULL;
3309 unsigned long flag;
3310
3311 for_each_active_iommu(iommu, drhd) {
3312 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3313 GFP_ATOMIC);
3314 if (!iommu->iommu_state)
3315 goto nomem;
3316 }
3317
3318 iommu_flush_all();
3319
3320 for_each_active_iommu(iommu, drhd) {
3321 iommu_disable_translation(iommu);
3322
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003323 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003324
3325 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3326 readl(iommu->reg + DMAR_FECTL_REG);
3327 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3328 readl(iommu->reg + DMAR_FEDATA_REG);
3329 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3330 readl(iommu->reg + DMAR_FEADDR_REG);
3331 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3332 readl(iommu->reg + DMAR_FEUADDR_REG);
3333
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003334 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003335 }
3336 return 0;
3337
3338nomem:
3339 for_each_active_iommu(iommu, drhd)
3340 kfree(iommu->iommu_state);
3341
3342 return -ENOMEM;
3343}
3344
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003345static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003346{
3347 struct dmar_drhd_unit *drhd;
3348 struct intel_iommu *iommu = NULL;
3349 unsigned long flag;
3350
3351 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003352 if (force_on)
3353 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3354 else
3355 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003356 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003357 }
3358
3359 for_each_active_iommu(iommu, drhd) {
3360
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003361 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003362
3363 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3364 iommu->reg + DMAR_FECTL_REG);
3365 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3366 iommu->reg + DMAR_FEDATA_REG);
3367 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3368 iommu->reg + DMAR_FEADDR_REG);
3369 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3370 iommu->reg + DMAR_FEUADDR_REG);
3371
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003372 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003373 }
3374
3375 for_each_active_iommu(iommu, drhd)
3376 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003377}
3378
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003379static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003380 .resume = iommu_resume,
3381 .suspend = iommu_suspend,
3382};
3383
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003384static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003385{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003386 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003387}
3388
3389#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003390static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003391#endif /* CONFIG_PM */
3392
Fenghua Yu99dcade2009-11-11 07:23:06 -08003393/*
3394 * Here we only respond to action of unbound device from driver.
3395 *
3396 * Added device is not attached to its DMAR domain here yet. That will happen
3397 * when mapping the device to iova.
3398 */
3399static int device_notifier(struct notifier_block *nb,
3400 unsigned long action, void *data)
3401{
3402 struct device *dev = data;
3403 struct pci_dev *pdev = to_pci_dev(dev);
3404 struct dmar_domain *domain;
3405
David Woodhouse44cd6132009-12-02 10:18:30 +00003406 if (iommu_no_mapping(dev))
3407 return 0;
3408
Fenghua Yu99dcade2009-11-11 07:23:06 -08003409 domain = find_domain(pdev);
3410 if (!domain)
3411 return 0;
3412
Alex Williamsona97590e2011-03-04 14:52:16 -07003413 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
Fenghua Yu99dcade2009-11-11 07:23:06 -08003414 domain_remove_one_dev_info(domain, pdev);
3415
Alex Williamsona97590e2011-03-04 14:52:16 -07003416 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3417 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3418 list_empty(&domain->devices))
3419 domain_exit(domain);
3420 }
3421
Fenghua Yu99dcade2009-11-11 07:23:06 -08003422 return 0;
3423}
3424
3425static struct notifier_block device_nb = {
3426 .notifier_call = device_notifier,
3427};
3428
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003429int __init intel_iommu_init(void)
3430{
3431 int ret = 0;
3432
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003433 /* VT-d is required for a TXT/tboot launch, so enforce that */
3434 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003435
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003436 if (dmar_table_init()) {
3437 if (force_on)
3438 panic("tboot: Failed to initialize DMAR table\n");
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003439 return -ENODEV;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003440 }
3441
3442 if (dmar_dev_scope_init()) {
3443 if (force_on)
3444 panic("tboot: Failed to initialize DMAR device scope\n");
3445 return -ENODEV;
3446 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003447
Suresh Siddha2ae21012008-07-10 11:16:43 -07003448 /*
3449 * Check the need for DMA-remapping initialization now.
3450 * Above initialization will also be used by Interrupt-remapping.
3451 */
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003452 if (no_iommu || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003453 return -ENODEV;
3454
Joseph Cihula51a63e62011-03-21 11:04:24 -07003455 if (iommu_init_mempool()) {
3456 if (force_on)
3457 panic("tboot: Failed to initialize iommu memory\n");
3458 return -ENODEV;
3459 }
3460
3461 if (dmar_init_reserved_ranges()) {
3462 if (force_on)
3463 panic("tboot: Failed to reserve iommu ranges\n");
3464 return -ENODEV;
3465 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003466
3467 init_no_remapping_devices();
3468
Joseph Cihulab7792602011-05-03 00:08:37 -07003469 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003470 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003471 if (force_on)
3472 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003473 printk(KERN_ERR "IOMMU: dmar init failed\n");
3474 put_iova_domain(&reserved_iova_list);
3475 iommu_exit_mempool();
3476 return ret;
3477 }
3478 printk(KERN_INFO
3479 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3480
mark gross5e0d2a62008-03-04 15:22:08 -08003481 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003482#ifdef CONFIG_SWIOTLB
3483 swiotlb = 0;
3484#endif
David Woodhouse19943b02009-08-04 16:19:20 +01003485 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003486
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003487 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003488
3489 register_iommu(&intel_iommu_ops);
3490
Fenghua Yu99dcade2009-11-11 07:23:06 -08003491 bus_register_notifier(&pci_bus_type, &device_nb);
3492
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003493 return 0;
3494}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003495
Han, Weidong3199aa62009-02-26 17:31:12 +08003496static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3497 struct pci_dev *pdev)
3498{
3499 struct pci_dev *tmp, *parent;
3500
3501 if (!iommu || !pdev)
3502 return;
3503
3504 /* dependent device detach */
3505 tmp = pci_find_upstream_pcie_bridge(pdev);
3506 /* Secondary interface's bus number and devfn 0 */
3507 if (tmp) {
3508 parent = pdev->bus->self;
3509 while (parent != tmp) {
3510 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003511 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003512 parent = parent->bus->self;
3513 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05003514 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08003515 iommu_detach_dev(iommu,
3516 tmp->subordinate->number, 0);
3517 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003518 iommu_detach_dev(iommu, tmp->bus->number,
3519 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003520 }
3521}
3522
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003523static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003524 struct pci_dev *pdev)
3525{
3526 struct device_domain_info *info;
3527 struct intel_iommu *iommu;
3528 unsigned long flags;
3529 int found = 0;
3530 struct list_head *entry, *tmp;
3531
David Woodhouse276dbf992009-04-04 01:45:37 +01003532 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3533 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003534 if (!iommu)
3535 return;
3536
3537 spin_lock_irqsave(&device_domain_lock, flags);
3538 list_for_each_safe(entry, tmp, &domain->devices) {
3539 info = list_entry(entry, struct device_domain_info, link);
Mike Habeck8519dc42011-05-28 13:15:07 -05003540 if (info->segment == pci_domain_nr(pdev->bus) &&
3541 info->bus == pdev->bus->number &&
Weidong Hanc7151a82008-12-08 22:51:37 +08003542 info->devfn == pdev->devfn) {
3543 list_del(&info->link);
3544 list_del(&info->global);
3545 if (info->dev)
3546 info->dev->dev.archdata.iommu = NULL;
3547 spin_unlock_irqrestore(&device_domain_lock, flags);
3548
Yu Zhao93a23a72009-05-18 13:51:37 +08003549 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003550 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003551 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003552 free_devinfo_mem(info);
3553
3554 spin_lock_irqsave(&device_domain_lock, flags);
3555
3556 if (found)
3557 break;
3558 else
3559 continue;
3560 }
3561
3562 /* if there is no other devices under the same iommu
3563 * owned by this domain, clear this iommu in iommu_bmp
3564 * update iommu count and coherency
3565 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003566 if (iommu == device_to_iommu(info->segment, info->bus,
3567 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003568 found = 1;
3569 }
3570
3571 if (found == 0) {
3572 unsigned long tmp_flags;
3573 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3574 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3575 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003576 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003577 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07003578
Alex Williamson9b4554b2011-05-24 12:19:04 -04003579 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3580 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3581 spin_lock_irqsave(&iommu->lock, tmp_flags);
3582 clear_bit(domain->id, iommu->domain_ids);
3583 iommu->domains[domain->id] = NULL;
3584 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3585 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003586 }
3587
3588 spin_unlock_irqrestore(&device_domain_lock, flags);
3589}
3590
3591static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3592{
3593 struct device_domain_info *info;
3594 struct intel_iommu *iommu;
3595 unsigned long flags1, flags2;
3596
3597 spin_lock_irqsave(&device_domain_lock, flags1);
3598 while (!list_empty(&domain->devices)) {
3599 info = list_entry(domain->devices.next,
3600 struct device_domain_info, link);
3601 list_del(&info->link);
3602 list_del(&info->global);
3603 if (info->dev)
3604 info->dev->dev.archdata.iommu = NULL;
3605
3606 spin_unlock_irqrestore(&device_domain_lock, flags1);
3607
Yu Zhao93a23a72009-05-18 13:51:37 +08003608 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003609 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003610 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003611 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003612
3613 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003614 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003615 */
3616 spin_lock_irqsave(&domain->iommu_lock, flags2);
3617 if (test_and_clear_bit(iommu->seq_id,
3618 &domain->iommu_bmp)) {
3619 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003620 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003621 }
3622 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3623
3624 free_devinfo_mem(info);
3625 spin_lock_irqsave(&device_domain_lock, flags1);
3626 }
3627 spin_unlock_irqrestore(&device_domain_lock, flags1);
3628}
3629
Weidong Han5e98c4b2008-12-08 23:03:27 +08003630/* domain id for virtual machine, it won't be set in context */
3631static unsigned long vm_domid;
3632
3633static struct dmar_domain *iommu_alloc_vm_domain(void)
3634{
3635 struct dmar_domain *domain;
3636
3637 domain = alloc_domain_mem();
3638 if (!domain)
3639 return NULL;
3640
3641 domain->id = vm_domid++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003642 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003643 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3644 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3645
3646 return domain;
3647}
3648
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003649static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003650{
3651 int adjust_width;
3652
3653 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003654 spin_lock_init(&domain->iommu_lock);
3655
3656 domain_reserve_special_ranges(domain);
3657
3658 /* calculate AGAW */
3659 domain->gaw = guest_width;
3660 adjust_width = guestwidth_to_adjustwidth(guest_width);
3661 domain->agaw = width_to_agaw(adjust_width);
3662
3663 INIT_LIST_HEAD(&domain->devices);
3664
3665 domain->iommu_count = 0;
3666 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003667 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01003668 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003669 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003670 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003671
3672 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07003673 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003674 if (!domain->pgd)
3675 return -ENOMEM;
3676 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3677 return 0;
3678}
3679
3680static void iommu_free_vm_domain(struct dmar_domain *domain)
3681{
3682 unsigned long flags;
3683 struct dmar_drhd_unit *drhd;
3684 struct intel_iommu *iommu;
3685 unsigned long i;
3686 unsigned long ndomains;
3687
3688 for_each_drhd_unit(drhd) {
3689 if (drhd->ignored)
3690 continue;
3691 iommu = drhd->iommu;
3692
3693 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08003694 for_each_set_bit(i, iommu->domain_ids, ndomains) {
Weidong Han5e98c4b2008-12-08 23:03:27 +08003695 if (iommu->domains[i] == domain) {
3696 spin_lock_irqsave(&iommu->lock, flags);
3697 clear_bit(i, iommu->domain_ids);
3698 iommu->domains[i] = NULL;
3699 spin_unlock_irqrestore(&iommu->lock, flags);
3700 break;
3701 }
Weidong Han5e98c4b2008-12-08 23:03:27 +08003702 }
3703 }
3704}
3705
3706static void vm_domain_exit(struct dmar_domain *domain)
3707{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003708 /* Domain 0 is reserved, so dont process it */
3709 if (!domain)
3710 return;
3711
3712 vm_domain_remove_all_dev_info(domain);
3713 /* destroy iovas */
3714 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003715
3716 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003717 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003718
3719 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003720 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003721
3722 iommu_free_vm_domain(domain);
3723 free_domain_mem(domain);
3724}
3725
Joerg Roedel5d450802008-12-03 14:52:32 +01003726static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003727{
Joerg Roedel5d450802008-12-03 14:52:32 +01003728 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003729
Joerg Roedel5d450802008-12-03 14:52:32 +01003730 dmar_domain = iommu_alloc_vm_domain();
3731 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003732 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003733 "intel_iommu_domain_init: dmar_domain == NULL\n");
3734 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003735 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003736 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003737 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003738 "intel_iommu_domain_init() failed\n");
3739 vm_domain_exit(dmar_domain);
3740 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003741 }
Joerg Roedel5d450802008-12-03 14:52:32 +01003742 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003743
Joerg Roedel5d450802008-12-03 14:52:32 +01003744 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003745}
Kay, Allen M38717942008-09-09 18:37:29 +03003746
Joerg Roedel5d450802008-12-03 14:52:32 +01003747static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003748{
Joerg Roedel5d450802008-12-03 14:52:32 +01003749 struct dmar_domain *dmar_domain = domain->priv;
3750
3751 domain->priv = NULL;
3752 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003753}
Kay, Allen M38717942008-09-09 18:37:29 +03003754
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003755static int intel_iommu_attach_device(struct iommu_domain *domain,
3756 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003757{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003758 struct dmar_domain *dmar_domain = domain->priv;
3759 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003760 struct intel_iommu *iommu;
3761 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03003762
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003763 /* normally pdev is not mapped */
3764 if (unlikely(domain_context_mapped(pdev))) {
3765 struct dmar_domain *old_domain;
3766
3767 old_domain = find_domain(pdev);
3768 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003769 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3770 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3771 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003772 else
3773 domain_remove_dev_info(old_domain);
3774 }
3775 }
3776
David Woodhouse276dbf992009-04-04 01:45:37 +01003777 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3778 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003779 if (!iommu)
3780 return -ENODEV;
3781
3782 /* check if this iommu agaw is sufficient for max mapped address */
3783 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01003784 if (addr_width > cap_mgaw(iommu->cap))
3785 addr_width = cap_mgaw(iommu->cap);
3786
3787 if (dmar_domain->max_addr > (1LL << addr_width)) {
3788 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003789 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01003790 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003791 return -EFAULT;
3792 }
Tom Lyona99c47a2010-05-17 08:20:45 +01003793 dmar_domain->gaw = addr_width;
3794
3795 /*
3796 * Knock out extra levels of page tables if necessary
3797 */
3798 while (iommu->agaw < dmar_domain->agaw) {
3799 struct dma_pte *pte;
3800
3801 pte = dmar_domain->pgd;
3802 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08003803 dmar_domain->pgd = (struct dma_pte *)
3804 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01003805 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01003806 }
3807 dmar_domain->agaw--;
3808 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003809
David Woodhouse5fe60f42009-08-09 10:53:41 +01003810 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003811}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003812
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003813static void intel_iommu_detach_device(struct iommu_domain *domain,
3814 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003815{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003816 struct dmar_domain *dmar_domain = domain->priv;
3817 struct pci_dev *pdev = to_pci_dev(dev);
3818
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003819 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003820}
Kay, Allen M38717942008-09-09 18:37:29 +03003821
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003822static int intel_iommu_map(struct iommu_domain *domain,
3823 unsigned long iova, phys_addr_t hpa,
3824 int gfp_order, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03003825{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003826 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003827 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003828 int prot = 0;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003829 size_t size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003830 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003831
Joerg Roedeldde57a22008-12-03 15:04:09 +01003832 if (iommu_prot & IOMMU_READ)
3833 prot |= DMA_PTE_READ;
3834 if (iommu_prot & IOMMU_WRITE)
3835 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08003836 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3837 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003838
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003839 size = PAGE_SIZE << gfp_order;
David Woodhouse163cc522009-06-28 00:51:17 +01003840 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003841 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003842 u64 end;
3843
3844 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01003845 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003846 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01003847 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003848 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01003849 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003850 return -EFAULT;
3851 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01003852 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003853 }
David Woodhousead051222009-06-28 14:22:28 +01003854 /* Round up size to next multiple of PAGE_SIZE, if it and
3855 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01003856 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01003857 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3858 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003859 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003860}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003861
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003862static int intel_iommu_unmap(struct iommu_domain *domain,
3863 unsigned long iova, int gfp_order)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003864{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003865 struct dmar_domain *dmar_domain = domain->priv;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003866 size_t size = PAGE_SIZE << gfp_order;
Sheng Yang4b99d352009-07-08 11:52:52 +01003867
David Woodhouse163cc522009-06-28 00:51:17 +01003868 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3869 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003870
David Woodhouse163cc522009-06-28 00:51:17 +01003871 if (dmar_domain->max_addr == iova + size)
3872 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003873
3874 return gfp_order;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003875}
Kay, Allen M38717942008-09-09 18:37:29 +03003876
Joerg Roedeld14d6572008-12-03 15:06:57 +01003877static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3878 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03003879{
Joerg Roedeld14d6572008-12-03 15:06:57 +01003880 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03003881 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003882 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003883
Youquan Song6dd9a7c2011-05-25 19:13:49 +01003884 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
Kay, Allen M38717942008-09-09 18:37:29 +03003885 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003886 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03003887
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003888 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03003889}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003890
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003891static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3892 unsigned long cap)
3893{
3894 struct dmar_domain *dmar_domain = domain->priv;
3895
3896 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3897 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04003898 if (cap == IOMMU_CAP_INTR_REMAP)
3899 return intr_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003900
3901 return 0;
3902}
3903
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003904static struct iommu_ops intel_iommu_ops = {
3905 .domain_init = intel_iommu_domain_init,
3906 .domain_destroy = intel_iommu_domain_destroy,
3907 .attach_dev = intel_iommu_attach_device,
3908 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003909 .map = intel_iommu_map,
3910 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003911 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003912 .domain_has_cap = intel_iommu_domain_has_cap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003913};
David Woodhouse9af88142009-02-13 23:18:03 +00003914
3915static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3916{
3917 /*
3918 * Mobile 4 Series Chipset neglects to set RWBF capability,
3919 * but needs it:
3920 */
3921 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3922 rwbf_quirk = 1;
David Woodhouse2d9e6672010-06-15 10:57:57 +01003923
3924 /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
3925 if (dev->revision == 0x07) {
3926 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
3927 dmar_map_gfx = 0;
3928 }
David Woodhouse9af88142009-02-13 23:18:03 +00003929}
3930
3931DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07003932
Adam Jacksoneecfd572010-08-25 21:17:34 +01003933#define GGC 0x52
3934#define GGC_MEMORY_SIZE_MASK (0xf << 8)
3935#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
3936#define GGC_MEMORY_SIZE_1M (0x1 << 8)
3937#define GGC_MEMORY_SIZE_2M (0x3 << 8)
3938#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
3939#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
3940#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
3941#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
3942
David Woodhouse9eecabc2010-09-21 22:28:23 +01003943static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
3944{
3945 unsigned short ggc;
3946
Adam Jacksoneecfd572010-08-25 21:17:34 +01003947 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01003948 return;
3949
Adam Jacksoneecfd572010-08-25 21:17:34 +01003950 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01003951 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
3952 dmar_map_gfx = 0;
3953 }
3954}
3955DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
3956DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
3957DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
3958DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
3959
David Woodhousee0fc7e02009-09-30 09:12:17 -07003960/* On Tylersburg chipsets, some BIOSes have been known to enable the
3961 ISOCH DMAR unit for the Azalia sound device, but not give it any
3962 TLB entries, which causes it to deadlock. Check for that. We do
3963 this in a function called from init_dmars(), instead of in a PCI
3964 quirk, because we don't want to print the obnoxious "BIOS broken"
3965 message if VT-d is actually disabled.
3966*/
3967static void __init check_tylersburg_isoch(void)
3968{
3969 struct pci_dev *pdev;
3970 uint32_t vtisochctrl;
3971
3972 /* If there's no Azalia in the system anyway, forget it. */
3973 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
3974 if (!pdev)
3975 return;
3976 pci_dev_put(pdev);
3977
3978 /* System Management Registers. Might be hidden, in which case
3979 we can't do the sanity check. But that's OK, because the
3980 known-broken BIOSes _don't_ actually hide it, so far. */
3981 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
3982 if (!pdev)
3983 return;
3984
3985 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
3986 pci_dev_put(pdev);
3987 return;
3988 }
3989
3990 pci_dev_put(pdev);
3991
3992 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
3993 if (vtisochctrl & 1)
3994 return;
3995
3996 /* Drop all bits other than the number of TLB entries */
3997 vtisochctrl &= 0x1c;
3998
3999 /* If we have the recommended number of TLB entries (16), fine. */
4000 if (vtisochctrl == 0x10)
4001 return;
4002
4003 /* Zero TLB entries? You get to ride the short bus to school. */
4004 if (!vtisochctrl) {
4005 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4006 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4007 dmi_get_system_info(DMI_BIOS_VENDOR),
4008 dmi_get_system_info(DMI_BIOS_VERSION),
4009 dmi_get_system_info(DMI_PRODUCT_VERSION));
4010 iommu_identity_mapping |= IDENTMAP_AZALIA;
4011 return;
4012 }
4013
4014 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4015 vtisochctrl);
4016}