blob: 8aa580dc30333cdf974b2a23180f067b7e71307c [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040027#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070028#include <linux/slab.h>
29#include <linux/irq.h>
30#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070031#include <linux/spinlock.h>
32#include <linux/pci.h>
33#include <linux/dmar.h>
34#include <linux/dma-mapping.h>
35#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080044#include <linux/memblock.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070045#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090046#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070047
Fenghua Yu5b6985c2008-10-16 18:02:32 -070048#define ROOT_SIZE VTD_PAGE_SIZE
49#define CONTEXT_SIZE VTD_PAGE_SIZE
50
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070051#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
52#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070053#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070054
55#define IOAPIC_RANGE_START (0xfee00000)
56#define IOAPIC_RANGE_END (0xfeefffff)
57#define IOVA_START_ADDR (0x1000)
58
59#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
60
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070061#define MAX_AGAW_WIDTH 64
62
David Woodhouse2ebe3152009-09-19 07:34:04 -070063#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
64#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
65
66/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
67 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
68#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
69 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
70#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070071
Mark McLoughlinf27be032008-11-20 15:49:43 +000072#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070073#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070074#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080075
Andrew Mortondf08cdc2010-09-22 13:05:11 -070076/* page table handling */
77#define LEVEL_STRIDE (9)
78#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
79
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020080/*
81 * This bitmap is used to advertise the page sizes our hardware support
82 * to the IOMMU core, which will then use this information to split
83 * physically contiguous memory regions it is mapping into page sizes
84 * that we support.
85 *
86 * Traditionally the IOMMU core just handed us the mappings directly,
87 * after making sure the size is an order of a 4KiB page and that the
88 * mapping has natural alignment.
89 *
90 * To retain this behavior, we currently advertise that we support
91 * all page sizes that are an order of 4KiB.
92 *
93 * If at some point we'd like to utilize the IOMMU core's new behavior,
94 * we could change this to advertise the real page sizes we support.
95 */
96#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
97
Andrew Mortondf08cdc2010-09-22 13:05:11 -070098static inline int agaw_to_level(int agaw)
99{
100 return agaw + 2;
101}
102
103static inline int agaw_to_width(int agaw)
104{
105 return 30 + agaw * LEVEL_STRIDE;
106}
107
108static inline int width_to_agaw(int width)
109{
110 return (width - 30) / LEVEL_STRIDE;
111}
112
113static inline unsigned int level_to_offset_bits(int level)
114{
115 return (level - 1) * LEVEL_STRIDE;
116}
117
118static inline int pfn_level_offset(unsigned long pfn, int level)
119{
120 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
121}
122
123static inline unsigned long level_mask(int level)
124{
125 return -1UL << level_to_offset_bits(level);
126}
127
128static inline unsigned long level_size(int level)
129{
130 return 1UL << level_to_offset_bits(level);
131}
132
133static inline unsigned long align_to_level(unsigned long pfn, int level)
134{
135 return (pfn + level_size(level) - 1) & level_mask(level);
136}
David Woodhousefd18de52009-05-10 23:57:41 +0100137
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100138static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
139{
140 return 1 << ((lvl - 1) * LEVEL_STRIDE);
141}
142
David Woodhousedd4e8312009-06-27 16:21:20 +0100143/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
144 are never going to work. */
145static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
146{
147 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
148}
149
150static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
151{
152 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
153}
154static inline unsigned long page_to_dma_pfn(struct page *pg)
155{
156 return mm_to_dma_pfn(page_to_pfn(pg));
157}
158static inline unsigned long virt_to_dma_pfn(void *p)
159{
160 return page_to_dma_pfn(virt_to_page(p));
161}
162
Weidong Hand9630fe2008-12-08 11:06:32 +0800163/* global iommu list, set NULL for ignored DMAR units */
164static struct intel_iommu **g_iommus;
165
David Woodhousee0fc7e02009-09-30 09:12:17 -0700166static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000167static int rwbf_quirk;
168
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000169/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700170 * set to 1 to panic kernel if can't successfully enable VT-d
171 * (used when kernel is launched w/ TXT)
172 */
173static int force_on = 0;
174
175/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000176 * 0: Present
177 * 1-11: Reserved
178 * 12-63: Context Ptr (12 - (haw-1))
179 * 64-127: Reserved
180 */
181struct root_entry {
182 u64 val;
183 u64 rsvd1;
184};
185#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
186static inline bool root_present(struct root_entry *root)
187{
188 return (root->val & 1);
189}
190static inline void set_root_present(struct root_entry *root)
191{
192 root->val |= 1;
193}
194static inline void set_root_value(struct root_entry *root, unsigned long value)
195{
196 root->val |= value & VTD_PAGE_MASK;
197}
198
199static inline struct context_entry *
200get_context_addr_from_root(struct root_entry *root)
201{
202 return (struct context_entry *)
203 (root_present(root)?phys_to_virt(
204 root->val & VTD_PAGE_MASK) :
205 NULL);
206}
207
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000208/*
209 * low 64 bits:
210 * 0: present
211 * 1: fault processing disable
212 * 2-3: translation type
213 * 12-63: address space root
214 * high 64 bits:
215 * 0-2: address width
216 * 3-6: aval
217 * 8-23: domain id
218 */
219struct context_entry {
220 u64 lo;
221 u64 hi;
222};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000223
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000224static inline bool context_present(struct context_entry *context)
225{
226 return (context->lo & 1);
227}
228static inline void context_set_present(struct context_entry *context)
229{
230 context->lo |= 1;
231}
232
233static inline void context_set_fault_enable(struct context_entry *context)
234{
235 context->lo &= (((u64)-1) << 2) | 1;
236}
237
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000238static inline void context_set_translation_type(struct context_entry *context,
239 unsigned long value)
240{
241 context->lo &= (((u64)-1) << 4) | 3;
242 context->lo |= (value & 3) << 2;
243}
244
245static inline void context_set_address_root(struct context_entry *context,
246 unsigned long value)
247{
248 context->lo |= value & VTD_PAGE_MASK;
249}
250
251static inline void context_set_address_width(struct context_entry *context,
252 unsigned long value)
253{
254 context->hi |= value & 7;
255}
256
257static inline void context_set_domain_id(struct context_entry *context,
258 unsigned long value)
259{
260 context->hi |= (value & ((1 << 16) - 1)) << 8;
261}
262
263static inline void context_clear_entry(struct context_entry *context)
264{
265 context->lo = 0;
266 context->hi = 0;
267}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000268
Mark McLoughlin622ba122008-11-20 15:49:46 +0000269/*
270 * 0: readable
271 * 1: writable
272 * 2-6: reserved
273 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800274 * 8-10: available
275 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000276 * 12-63: Host physcial address
277 */
278struct dma_pte {
279 u64 val;
280};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000281
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000282static inline void dma_clear_pte(struct dma_pte *pte)
283{
284 pte->val = 0;
285}
286
287static inline void dma_set_pte_readable(struct dma_pte *pte)
288{
289 pte->val |= DMA_PTE_READ;
290}
291
292static inline void dma_set_pte_writable(struct dma_pte *pte)
293{
294 pte->val |= DMA_PTE_WRITE;
295}
296
Sheng Yang9cf066972009-03-18 15:33:07 +0800297static inline void dma_set_pte_snp(struct dma_pte *pte)
298{
299 pte->val |= DMA_PTE_SNP;
300}
301
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000302static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
303{
304 pte->val = (pte->val & ~3) | (prot & 3);
305}
306
307static inline u64 dma_pte_addr(struct dma_pte *pte)
308{
David Woodhousec85994e2009-07-01 19:21:24 +0100309#ifdef CONFIG_64BIT
310 return pte->val & VTD_PAGE_MASK;
311#else
312 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100313 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100314#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000315}
316
David Woodhousedd4e8312009-06-27 16:21:20 +0100317static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000318{
David Woodhousedd4e8312009-06-27 16:21:20 +0100319 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000320}
321
322static inline bool dma_pte_present(struct dma_pte *pte)
323{
324 return (pte->val & 3) != 0;
325}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000326
Allen Kay4399c8b2011-10-14 12:32:46 -0700327static inline bool dma_pte_superpage(struct dma_pte *pte)
328{
329 return (pte->val & (1 << 7));
330}
331
David Woodhouse75e6bf92009-07-02 11:21:16 +0100332static inline int first_pte_in_page(struct dma_pte *pte)
333{
334 return !((unsigned long)pte & ~VTD_PAGE_MASK);
335}
336
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700337/*
338 * This domain is a statically identity mapping domain.
339 * 1. This domain creats a static 1:1 mapping to all usable memory.
340 * 2. It maps to each iommu if successful.
341 * 3. Each iommu mapps to this domain if successful.
342 */
David Woodhouse19943b02009-08-04 16:19:20 +0100343static struct dmar_domain *si_domain;
344static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700345
Weidong Han3b5410e2008-12-08 09:17:15 +0800346/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100347#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800348
Weidong Han1ce28fe2008-12-08 16:35:39 +0800349/* domain represents a virtual machine, more than one devices
350 * across iommus may be owned in one domain, e.g. kvm guest.
351 */
352#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
353
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700354/* si_domain contains mulitple devices */
355#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
356
Mark McLoughlin99126f72008-11-20 15:49:47 +0000357struct dmar_domain {
358 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700359 int nid; /* node id */
Weidong Han8c11e792008-12-08 15:29:22 +0800360 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000361
362 struct list_head devices; /* all devices' list */
363 struct iova_domain iovad; /* iova's that belong to this domain */
364
365 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000366 int gaw; /* max guest address width */
367
368 /* adjusted guest address width, 0 is level 2 30-bit */
369 int agaw;
370
Weidong Han3b5410e2008-12-08 09:17:15 +0800371 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800372
373 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800374 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800375 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100376 int iommu_superpage;/* Level of superpages supported:
377 0 == 4KiB (no superpages), 1 == 2MiB,
378 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800379 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800380 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000381};
382
Mark McLoughlina647dac2008-11-20 15:49:48 +0000383/* PCI domain-device relationship */
384struct device_domain_info {
385 struct list_head link; /* link to domain siblings */
386 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100387 int segment; /* PCI domain */
388 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000389 u8 devfn; /* PCI devfn number */
Stefan Assmann45e829e2009-12-03 06:49:24 -0500390 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800391 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000392 struct dmar_domain *domain; /* pointer to domain */
393};
394
mark gross5e0d2a62008-03-04 15:22:08 -0800395static void flush_unmaps_timeout(unsigned long data);
396
397DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
398
mark gross80b20dd2008-04-18 13:53:58 -0700399#define HIGH_WATER_MARK 250
400struct deferred_flush_tables {
401 int next;
402 struct iova *iova[HIGH_WATER_MARK];
403 struct dmar_domain *domain[HIGH_WATER_MARK];
404};
405
406static struct deferred_flush_tables *deferred_flush;
407
mark gross5e0d2a62008-03-04 15:22:08 -0800408/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800409static int g_num_of_iommus;
410
411static DEFINE_SPINLOCK(async_umap_flush_lock);
412static LIST_HEAD(unmaps_to_do);
413
414static int timer_on;
415static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800416
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700417static void domain_remove_dev_info(struct dmar_domain *domain);
418
Suresh Siddhad3f13812011-08-23 17:05:25 -0700419#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800420int dmar_disabled = 0;
421#else
422int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700423#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800424
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200425int intel_iommu_enabled = 0;
426EXPORT_SYMBOL_GPL(intel_iommu_enabled);
427
David Woodhouse2d9e6672010-06-15 10:57:57 +0100428static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700429static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800430static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100431static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700432
David Woodhousec0771df2011-10-14 20:59:46 +0100433int intel_iommu_gfx_mapped;
434EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
435
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700436#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
437static DEFINE_SPINLOCK(device_domain_lock);
438static LIST_HEAD(device_domain_list);
439
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100440static struct iommu_ops intel_iommu_ops;
441
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700442static int __init intel_iommu_setup(char *str)
443{
444 if (!str)
445 return -EINVAL;
446 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800447 if (!strncmp(str, "on", 2)) {
448 dmar_disabled = 0;
449 printk(KERN_INFO "Intel-IOMMU: enabled\n");
450 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700451 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800452 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700453 } else if (!strncmp(str, "igfx_off", 8)) {
454 dmar_map_gfx = 0;
455 printk(KERN_INFO
456 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700457 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800458 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700459 "Intel-IOMMU: Forcing DAC for PCI devices\n");
460 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800461 } else if (!strncmp(str, "strict", 6)) {
462 printk(KERN_INFO
463 "Intel-IOMMU: disable batched IOTLB flush\n");
464 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100465 } else if (!strncmp(str, "sp_off", 6)) {
466 printk(KERN_INFO
467 "Intel-IOMMU: disable supported super page\n");
468 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700469 }
470
471 str += strcspn(str, ",");
472 while (*str == ',')
473 str++;
474 }
475 return 0;
476}
477__setup("intel_iommu=", intel_iommu_setup);
478
479static struct kmem_cache *iommu_domain_cache;
480static struct kmem_cache *iommu_devinfo_cache;
481static struct kmem_cache *iommu_iova_cache;
482
Suresh Siddha4c923d42009-10-02 11:01:24 -0700483static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700484{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700485 struct page *page;
486 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700487
Suresh Siddha4c923d42009-10-02 11:01:24 -0700488 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
489 if (page)
490 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700491 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700492}
493
494static inline void free_pgtable_page(void *vaddr)
495{
496 free_page((unsigned long)vaddr);
497}
498
499static inline void *alloc_domain_mem(void)
500{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900501 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700502}
503
Kay, Allen M38717942008-09-09 18:37:29 +0300504static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700505{
506 kmem_cache_free(iommu_domain_cache, vaddr);
507}
508
509static inline void * alloc_devinfo_mem(void)
510{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900511 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700512}
513
514static inline void free_devinfo_mem(void *vaddr)
515{
516 kmem_cache_free(iommu_devinfo_cache, vaddr);
517}
518
519struct iova *alloc_iova_mem(void)
520{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900521 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700522}
523
524void free_iova_mem(struct iova *iova)
525{
526 kmem_cache_free(iommu_iova_cache, iova);
527}
528
Weidong Han1b573682008-12-08 15:34:06 +0800529
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700530static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800531{
532 unsigned long sagaw;
533 int agaw = -1;
534
535 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700536 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800537 agaw >= 0; agaw--) {
538 if (test_bit(agaw, &sagaw))
539 break;
540 }
541
542 return agaw;
543}
544
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700545/*
546 * Calculate max SAGAW for each iommu.
547 */
548int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
549{
550 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
551}
552
553/*
554 * calculate agaw for each iommu.
555 * "SAGAW" may be different across iommus, use a default agaw, and
556 * get a supported less agaw for iommus that don't support the default agaw.
557 */
558int iommu_calculate_agaw(struct intel_iommu *iommu)
559{
560 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
561}
562
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700563/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800564static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
565{
566 int iommu_id;
567
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700568 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800569 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700570 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800571
Weidong Han8c11e792008-12-08 15:29:22 +0800572 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
573 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
574 return NULL;
575
576 return g_iommus[iommu_id];
577}
578
Weidong Han8e6040972008-12-08 15:49:06 +0800579static void domain_update_iommu_coherency(struct dmar_domain *domain)
580{
581 int i;
582
583 domain->iommu_coherency = 1;
584
Akinobu Mitaa45946a2010-03-11 14:04:08 -0800585 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
Weidong Han8e6040972008-12-08 15:49:06 +0800586 if (!ecap_coherent(g_iommus[i]->ecap)) {
587 domain->iommu_coherency = 0;
588 break;
589 }
Weidong Han8e6040972008-12-08 15:49:06 +0800590 }
591}
592
Sheng Yang58c610b2009-03-18 15:33:05 +0800593static void domain_update_iommu_snooping(struct dmar_domain *domain)
594{
595 int i;
596
597 domain->iommu_snooping = 1;
598
Akinobu Mitaa45946a2010-03-11 14:04:08 -0800599 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800600 if (!ecap_sc_support(g_iommus[i]->ecap)) {
601 domain->iommu_snooping = 0;
602 break;
603 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800604 }
605}
606
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100607static void domain_update_iommu_superpage(struct dmar_domain *domain)
608{
Allen Kay8140a952011-10-14 12:32:17 -0700609 struct dmar_drhd_unit *drhd;
610 struct intel_iommu *iommu = NULL;
611 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100612
613 if (!intel_iommu_superpage) {
614 domain->iommu_superpage = 0;
615 return;
616 }
617
Allen Kay8140a952011-10-14 12:32:17 -0700618 /* set iommu_superpage to the smallest common denominator */
619 for_each_active_iommu(iommu, drhd) {
620 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100621 if (!mask) {
622 break;
623 }
624 }
625 domain->iommu_superpage = fls(mask);
626}
627
Sheng Yang58c610b2009-03-18 15:33:05 +0800628/* Some capabilities may be different across iommus */
629static void domain_update_iommu_cap(struct dmar_domain *domain)
630{
631 domain_update_iommu_coherency(domain);
632 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100633 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800634}
635
David Woodhouse276dbf992009-04-04 01:45:37 +0100636static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800637{
638 struct dmar_drhd_unit *drhd = NULL;
639 int i;
640
641 for_each_drhd_unit(drhd) {
642 if (drhd->ignored)
643 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100644 if (segment != drhd->segment)
645 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800646
David Woodhouse924b6232009-04-04 00:39:25 +0100647 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000648 if (drhd->devices[i] &&
649 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800650 drhd->devices[i]->devfn == devfn)
651 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700652 if (drhd->devices[i] &&
653 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100654 drhd->devices[i]->subordinate->number <= bus &&
655 drhd->devices[i]->subordinate->subordinate >= bus)
656 return drhd->iommu;
657 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800658
659 if (drhd->include_all)
660 return drhd->iommu;
661 }
662
663 return NULL;
664}
665
Weidong Han5331fe62008-12-08 23:00:00 +0800666static void domain_flush_cache(struct dmar_domain *domain,
667 void *addr, int size)
668{
669 if (!domain->iommu_coherency)
670 clflush_cache_range(addr, size);
671}
672
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700673/* Gets context entry for a given bus and devfn */
674static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
675 u8 bus, u8 devfn)
676{
677 struct root_entry *root;
678 struct context_entry *context;
679 unsigned long phy_addr;
680 unsigned long flags;
681
682 spin_lock_irqsave(&iommu->lock, flags);
683 root = &iommu->root_entry[bus];
684 context = get_context_addr_from_root(root);
685 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700686 context = (struct context_entry *)
687 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700688 if (!context) {
689 spin_unlock_irqrestore(&iommu->lock, flags);
690 return NULL;
691 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700692 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700693 phy_addr = virt_to_phys((void *)context);
694 set_root_value(root, phy_addr);
695 set_root_present(root);
696 __iommu_flush_cache(iommu, root, sizeof(*root));
697 }
698 spin_unlock_irqrestore(&iommu->lock, flags);
699 return &context[devfn];
700}
701
702static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
703{
704 struct root_entry *root;
705 struct context_entry *context;
706 int ret;
707 unsigned long flags;
708
709 spin_lock_irqsave(&iommu->lock, flags);
710 root = &iommu->root_entry[bus];
711 context = get_context_addr_from_root(root);
712 if (!context) {
713 ret = 0;
714 goto out;
715 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000716 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700717out:
718 spin_unlock_irqrestore(&iommu->lock, flags);
719 return ret;
720}
721
722static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
723{
724 struct root_entry *root;
725 struct context_entry *context;
726 unsigned long flags;
727
728 spin_lock_irqsave(&iommu->lock, flags);
729 root = &iommu->root_entry[bus];
730 context = get_context_addr_from_root(root);
731 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000732 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700733 __iommu_flush_cache(iommu, &context[devfn], \
734 sizeof(*context));
735 }
736 spin_unlock_irqrestore(&iommu->lock, flags);
737}
738
739static void free_context_table(struct intel_iommu *iommu)
740{
741 struct root_entry *root;
742 int i;
743 unsigned long flags;
744 struct context_entry *context;
745
746 spin_lock_irqsave(&iommu->lock, flags);
747 if (!iommu->root_entry) {
748 goto out;
749 }
750 for (i = 0; i < ROOT_ENTRY_NR; i++) {
751 root = &iommu->root_entry[i];
752 context = get_context_addr_from_root(root);
753 if (context)
754 free_pgtable_page(context);
755 }
756 free_pgtable_page(iommu->root_entry);
757 iommu->root_entry = NULL;
758out:
759 spin_unlock_irqrestore(&iommu->lock, flags);
760}
761
David Woodhouseb026fd22009-06-28 10:37:25 +0100762static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
Allen Kay4399c8b2011-10-14 12:32:46 -0700763 unsigned long pfn, int target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700764{
David Woodhouseb026fd22009-06-28 10:37:25 +0100765 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700766 struct dma_pte *parent, *pte = NULL;
767 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700768 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700769
770 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100771 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700772 parent = domain->pgd;
773
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700774 while (level > 0) {
775 void *tmp_page;
776
David Woodhouseb026fd22009-06-28 10:37:25 +0100777 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700778 pte = &parent[offset];
Allen Kay4399c8b2011-10-14 12:32:46 -0700779 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100780 break;
781 if (level == target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700782 break;
783
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000784 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100785 uint64_t pteval;
786
Suresh Siddha4c923d42009-10-02 11:01:24 -0700787 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700788
David Woodhouse206a73c12009-07-01 19:30:28 +0100789 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700790 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100791
David Woodhousec85994e2009-07-01 19:21:24 +0100792 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400793 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100794 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
795 /* Someone else set it while we were thinking; use theirs. */
796 free_pgtable_page(tmp_page);
797 } else {
798 dma_pte_addr(pte);
799 domain_flush_cache(domain, pte, sizeof(*pte));
800 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700801 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000802 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700803 level--;
804 }
805
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700806 return pte;
807}
808
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100809
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700810/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100811static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
812 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100813 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700814{
815 struct dma_pte *parent, *pte = NULL;
816 int total = agaw_to_level(domain->agaw);
817 int offset;
818
819 parent = domain->pgd;
820 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100821 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700822 pte = &parent[offset];
823 if (level == total)
824 return pte;
825
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100826 if (!dma_pte_present(pte)) {
827 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700828 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100829 }
830
831 if (pte->val & DMA_PTE_LARGE_PAGE) {
832 *large_page = total;
833 return pte;
834 }
835
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000836 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700837 total--;
838 }
839 return NULL;
840}
841
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700842/* clear last level pte, a tlb flush should be followed */
Allen Kay292827c2011-10-14 12:31:54 -0700843static int dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100844 unsigned long start_pfn,
845 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700846{
David Woodhouse04b18e62009-06-27 19:15:01 +0100847 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100848 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100849 struct dma_pte *first_pte, *pte;
Allen Kay292827c2011-10-14 12:31:54 -0700850 int order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700851
David Woodhouse04b18e62009-06-27 19:15:01 +0100852 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100853 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700854 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100855
David Woodhouse04b18e62009-06-27 19:15:01 +0100856 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700857 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100858 large_page = 1;
859 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100860 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100861 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100862 continue;
863 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100864 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100865 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100866 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100867 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100868 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
869
David Woodhouse310a5ab2009-06-28 18:52:20 +0100870 domain_flush_cache(domain, first_pte,
871 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700872
873 } while (start_pfn && start_pfn <= last_pfn);
Allen Kay292827c2011-10-14 12:31:54 -0700874
875 order = (large_page - 1) * 9;
876 return order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700877}
878
879/* free page table pages. last level pte should already be cleared */
880static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100881 unsigned long start_pfn,
882 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700883{
David Woodhouse6660c632009-06-27 22:41:00 +0100884 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhousef3a0a522009-06-30 03:40:07 +0100885 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700886 int total = agaw_to_level(domain->agaw);
887 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100888 unsigned long tmp;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100889 int large_page = 2;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700890
David Woodhouse6660c632009-06-27 22:41:00 +0100891 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
892 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700893 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700894
David Woodhousef3a0a522009-06-30 03:40:07 +0100895 /* We don't need lock here; nobody else touches the iova range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700896 level = 2;
897 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100898 tmp = align_to_level(start_pfn, level);
899
David Woodhousef3a0a522009-06-30 03:40:07 +0100900 /* If we can't even clear one PTE at this level, we're done */
David Woodhouse6660c632009-06-27 22:41:00 +0100901 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700902 return;
903
David Woodhouse59c36282009-09-19 07:36:28 -0700904 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100905 large_page = level;
906 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
907 if (large_page > level)
908 level = large_page + 1;
David Woodhousef3a0a522009-06-30 03:40:07 +0100909 if (!pte) {
910 tmp = align_to_level(tmp + 1, level + 1);
911 continue;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700912 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100913 do {
David Woodhouse6a43e572009-07-02 12:02:34 +0100914 if (dma_pte_present(pte)) {
915 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
916 dma_clear_pte(pte);
917 }
David Woodhousef3a0a522009-06-30 03:40:07 +0100918 pte++;
919 tmp += level_size(level);
David Woodhouse75e6bf92009-07-02 11:21:16 +0100920 } while (!first_pte_in_page(pte) &&
921 tmp + level_size(level) - 1 <= last_pfn);
922
David Woodhousef3a0a522009-06-30 03:40:07 +0100923 domain_flush_cache(domain, first_pte,
924 (void *)pte - (void *)first_pte);
925
David Woodhouse59c36282009-09-19 07:36:28 -0700926 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700927 level++;
928 }
929 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100930 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700931 free_pgtable_page(domain->pgd);
932 domain->pgd = NULL;
933 }
934}
935
936/* iommu handling */
937static int iommu_alloc_root_entry(struct intel_iommu *iommu)
938{
939 struct root_entry *root;
940 unsigned long flags;
941
Suresh Siddha4c923d42009-10-02 11:01:24 -0700942 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700943 if (!root)
944 return -ENOMEM;
945
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700946 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700947
948 spin_lock_irqsave(&iommu->lock, flags);
949 iommu->root_entry = root;
950 spin_unlock_irqrestore(&iommu->lock, flags);
951
952 return 0;
953}
954
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700955static void iommu_set_root_entry(struct intel_iommu *iommu)
956{
957 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100958 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700959 unsigned long flag;
960
961 addr = iommu->root_entry;
962
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200963 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700964 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
965
David Woodhousec416daa2009-05-10 20:30:58 +0100966 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700967
968 /* Make sure hardware complete it */
969 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100970 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700971
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200972 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700973}
974
975static void iommu_flush_write_buffer(struct intel_iommu *iommu)
976{
977 u32 val;
978 unsigned long flag;
979
David Woodhouse9af88142009-02-13 23:18:03 +0000980 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700981 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700982
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200983 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100984 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700985
986 /* Make sure hardware complete it */
987 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100988 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700989
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200990 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700991}
992
993/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100994static void __iommu_flush_context(struct intel_iommu *iommu,
995 u16 did, u16 source_id, u8 function_mask,
996 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700997{
998 u64 val = 0;
999 unsigned long flag;
1000
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001001 switch (type) {
1002 case DMA_CCMD_GLOBAL_INVL:
1003 val = DMA_CCMD_GLOBAL_INVL;
1004 break;
1005 case DMA_CCMD_DOMAIN_INVL:
1006 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1007 break;
1008 case DMA_CCMD_DEVICE_INVL:
1009 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1010 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1011 break;
1012 default:
1013 BUG();
1014 }
1015 val |= DMA_CCMD_ICC;
1016
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001017 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001018 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1019
1020 /* Make sure hardware complete it */
1021 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1022 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1023
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001024 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001025}
1026
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001027/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001028static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1029 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001030{
1031 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1032 u64 val = 0, val_iva = 0;
1033 unsigned long flag;
1034
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001035 switch (type) {
1036 case DMA_TLB_GLOBAL_FLUSH:
1037 /* global flush doesn't need set IVA_REG */
1038 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1039 break;
1040 case DMA_TLB_DSI_FLUSH:
1041 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1042 break;
1043 case DMA_TLB_PSI_FLUSH:
1044 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1045 /* Note: always flush non-leaf currently */
1046 val_iva = size_order | addr;
1047 break;
1048 default:
1049 BUG();
1050 }
1051 /* Note: set drain read/write */
1052#if 0
1053 /*
1054 * This is probably to be super secure.. Looks like we can
1055 * ignore it without any impact.
1056 */
1057 if (cap_read_drain(iommu->cap))
1058 val |= DMA_TLB_READ_DRAIN;
1059#endif
1060 if (cap_write_drain(iommu->cap))
1061 val |= DMA_TLB_WRITE_DRAIN;
1062
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001063 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001064 /* Note: Only uses first TLB reg currently */
1065 if (val_iva)
1066 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1067 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1068
1069 /* Make sure hardware complete it */
1070 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1071 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1072
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001073 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001074
1075 /* check IOTLB invalidation granularity */
1076 if (DMA_TLB_IAIG(val) == 0)
1077 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1078 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1079 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001080 (unsigned long long)DMA_TLB_IIRG(type),
1081 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001082}
1083
Yu Zhao93a23a72009-05-18 13:51:37 +08001084static struct device_domain_info *iommu_support_dev_iotlb(
1085 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001086{
Yu Zhao93a23a72009-05-18 13:51:37 +08001087 int found = 0;
1088 unsigned long flags;
1089 struct device_domain_info *info;
1090 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1091
1092 if (!ecap_dev_iotlb_support(iommu->ecap))
1093 return NULL;
1094
1095 if (!iommu->qi)
1096 return NULL;
1097
1098 spin_lock_irqsave(&device_domain_lock, flags);
1099 list_for_each_entry(info, &domain->devices, link)
1100 if (info->bus == bus && info->devfn == devfn) {
1101 found = 1;
1102 break;
1103 }
1104 spin_unlock_irqrestore(&device_domain_lock, flags);
1105
1106 if (!found || !info->dev)
1107 return NULL;
1108
1109 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1110 return NULL;
1111
1112 if (!dmar_find_matched_atsr_unit(info->dev))
1113 return NULL;
1114
1115 info->iommu = iommu;
1116
1117 return info;
1118}
1119
1120static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1121{
1122 if (!info)
1123 return;
1124
1125 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1126}
1127
1128static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1129{
1130 if (!info->dev || !pci_ats_enabled(info->dev))
1131 return;
1132
1133 pci_disable_ats(info->dev);
1134}
1135
1136static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1137 u64 addr, unsigned mask)
1138{
1139 u16 sid, qdep;
1140 unsigned long flags;
1141 struct device_domain_info *info;
1142
1143 spin_lock_irqsave(&device_domain_lock, flags);
1144 list_for_each_entry(info, &domain->devices, link) {
1145 if (!info->dev || !pci_ats_enabled(info->dev))
1146 continue;
1147
1148 sid = info->bus << 8 | info->devfn;
1149 qdep = pci_ats_queue_depth(info->dev);
1150 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1151 }
1152 spin_unlock_irqrestore(&device_domain_lock, flags);
1153}
1154
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001155static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
Nadav Amit82653632010-04-01 13:24:40 +03001156 unsigned long pfn, unsigned int pages, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001157{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001158 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001159 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001160
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001161 BUG_ON(pages == 0);
1162
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001163 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001164 * Fallback to domain selective flush if no PSI support or the size is
1165 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001166 * PSI requires page size to be 2 ^ x, and the base address is naturally
1167 * aligned to the size
1168 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001169 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1170 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001171 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001172 else
1173 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1174 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001175
1176 /*
Nadav Amit82653632010-04-01 13:24:40 +03001177 * In caching mode, changes of pages from non-present to present require
1178 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001179 */
Nadav Amit82653632010-04-01 13:24:40 +03001180 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001181 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001182}
1183
mark grossf8bab732008-02-08 04:18:38 -08001184static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1185{
1186 u32 pmen;
1187 unsigned long flags;
1188
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001189 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001190 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1191 pmen &= ~DMA_PMEN_EPM;
1192 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1193
1194 /* wait for the protected region status bit to clear */
1195 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1196 readl, !(pmen & DMA_PMEN_PRS), pmen);
1197
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001198 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001199}
1200
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001201static int iommu_enable_translation(struct intel_iommu *iommu)
1202{
1203 u32 sts;
1204 unsigned long flags;
1205
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001206 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001207 iommu->gcmd |= DMA_GCMD_TE;
1208 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001209
1210 /* Make sure hardware complete it */
1211 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001212 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001213
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001214 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001215 return 0;
1216}
1217
1218static int iommu_disable_translation(struct intel_iommu *iommu)
1219{
1220 u32 sts;
1221 unsigned long flag;
1222
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001223 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001224 iommu->gcmd &= ~DMA_GCMD_TE;
1225 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1226
1227 /* Make sure hardware complete it */
1228 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001229 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001230
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001231 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001232 return 0;
1233}
1234
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001235
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001236static int iommu_init_domains(struct intel_iommu *iommu)
1237{
1238 unsigned long ndomains;
1239 unsigned long nlongs;
1240
1241 ndomains = cap_ndoms(iommu->cap);
Yinghai Lu680a7522010-04-08 19:58:23 +01001242 pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
1243 ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001244 nlongs = BITS_TO_LONGS(ndomains);
1245
Donald Dutile94a91b52009-08-20 16:51:34 -04001246 spin_lock_init(&iommu->lock);
1247
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001248 /* TBD: there might be 64K domains,
1249 * consider other allocation for future chip
1250 */
1251 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1252 if (!iommu->domain_ids) {
1253 printk(KERN_ERR "Allocating domain id array failed\n");
1254 return -ENOMEM;
1255 }
1256 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1257 GFP_KERNEL);
1258 if (!iommu->domains) {
1259 printk(KERN_ERR "Allocating domain array failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001260 return -ENOMEM;
1261 }
1262
1263 /*
1264 * if Caching mode is set, then invalid translations are tagged
1265 * with domainid 0. Hence we need to pre-allocate it.
1266 */
1267 if (cap_caching_mode(iommu->cap))
1268 set_bit(0, iommu->domain_ids);
1269 return 0;
1270}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001271
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001272
1273static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001274static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001275
1276void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001277{
1278 struct dmar_domain *domain;
1279 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001280 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001281
Donald Dutile94a91b52009-08-20 16:51:34 -04001282 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001283 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Donald Dutile94a91b52009-08-20 16:51:34 -04001284 domain = iommu->domains[i];
1285 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001286
Donald Dutile94a91b52009-08-20 16:51:34 -04001287 spin_lock_irqsave(&domain->iommu_lock, flags);
1288 if (--domain->iommu_count == 0) {
1289 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1290 vm_domain_exit(domain);
1291 else
1292 domain_exit(domain);
1293 }
1294 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001295 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001296 }
1297
1298 if (iommu->gcmd & DMA_GCMD_TE)
1299 iommu_disable_translation(iommu);
1300
1301 if (iommu->irq) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001302 irq_set_handler_data(iommu->irq, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001303 /* This will mask the irq */
1304 free_irq(iommu->irq, iommu);
1305 destroy_irq(iommu->irq);
1306 }
1307
1308 kfree(iommu->domains);
1309 kfree(iommu->domain_ids);
1310
Weidong Hand9630fe2008-12-08 11:06:32 +08001311 g_iommus[iommu->seq_id] = NULL;
1312
1313 /* if all iommus are freed, free g_iommus */
1314 for (i = 0; i < g_num_of_iommus; i++) {
1315 if (g_iommus[i])
1316 break;
1317 }
1318
1319 if (i == g_num_of_iommus)
1320 kfree(g_iommus);
1321
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001322 /* free context mapping */
1323 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001324}
1325
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001326static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001327{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001328 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001329
1330 domain = alloc_domain_mem();
1331 if (!domain)
1332 return NULL;
1333
Suresh Siddha4c923d42009-10-02 11:01:24 -07001334 domain->nid = -1;
Weidong Han8c11e792008-12-08 15:29:22 +08001335 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
Weidong Hand71a2f32008-12-07 21:13:41 +08001336 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001337
1338 return domain;
1339}
1340
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001341static int iommu_attach_domain(struct dmar_domain *domain,
1342 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001343{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001344 int num;
1345 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001346 unsigned long flags;
1347
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001348 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001349
1350 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001351
1352 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1353 if (num >= ndomains) {
1354 spin_unlock_irqrestore(&iommu->lock, flags);
1355 printk(KERN_ERR "IOMMU: no free domain ids\n");
1356 return -ENOMEM;
1357 }
1358
1359 domain->id = num;
1360 set_bit(num, iommu->domain_ids);
1361 set_bit(iommu->seq_id, &domain->iommu_bmp);
1362 iommu->domains[num] = domain;
1363 spin_unlock_irqrestore(&iommu->lock, flags);
1364
1365 return 0;
1366}
1367
1368static void iommu_detach_domain(struct dmar_domain *domain,
1369 struct intel_iommu *iommu)
1370{
1371 unsigned long flags;
1372 int num, ndomains;
1373 int found = 0;
1374
1375 spin_lock_irqsave(&iommu->lock, flags);
1376 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001377 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001378 if (iommu->domains[num] == domain) {
1379 found = 1;
1380 break;
1381 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001382 }
1383
1384 if (found) {
1385 clear_bit(num, iommu->domain_ids);
1386 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1387 iommu->domains[num] = NULL;
1388 }
Weidong Han8c11e792008-12-08 15:29:22 +08001389 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001390}
1391
1392static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001393static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001394
Joseph Cihula51a63e62011-03-21 11:04:24 -07001395static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001396{
1397 struct pci_dev *pdev = NULL;
1398 struct iova *iova;
1399 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001400
David Millerf6611972008-02-06 01:36:23 -08001401 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001402
Mark Gross8a443df2008-03-04 14:59:31 -08001403 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1404 &reserved_rbtree_key);
1405
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001406 /* IOAPIC ranges shouldn't be accessed by DMA */
1407 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1408 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001409 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001410 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001411 return -ENODEV;
1412 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001413
1414 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1415 for_each_pci_dev(pdev) {
1416 struct resource *r;
1417
1418 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1419 r = &pdev->resource[i];
1420 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1421 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001422 iova = reserve_iova(&reserved_iova_list,
1423 IOVA_PFN(r->start),
1424 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001425 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001426 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001427 return -ENODEV;
1428 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001429 }
1430 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001431 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001432}
1433
1434static void domain_reserve_special_ranges(struct dmar_domain *domain)
1435{
1436 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1437}
1438
1439static inline int guestwidth_to_adjustwidth(int gaw)
1440{
1441 int agaw;
1442 int r = (gaw - 12) % 9;
1443
1444 if (r == 0)
1445 agaw = gaw;
1446 else
1447 agaw = gaw + 9 - r;
1448 if (agaw > 64)
1449 agaw = 64;
1450 return agaw;
1451}
1452
1453static int domain_init(struct dmar_domain *domain, int guest_width)
1454{
1455 struct intel_iommu *iommu;
1456 int adjust_width, agaw;
1457 unsigned long sagaw;
1458
David Millerf6611972008-02-06 01:36:23 -08001459 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001460 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001461
1462 domain_reserve_special_ranges(domain);
1463
1464 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001465 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001466 if (guest_width > cap_mgaw(iommu->cap))
1467 guest_width = cap_mgaw(iommu->cap);
1468 domain->gaw = guest_width;
1469 adjust_width = guestwidth_to_adjustwidth(guest_width);
1470 agaw = width_to_agaw(adjust_width);
1471 sagaw = cap_sagaw(iommu->cap);
1472 if (!test_bit(agaw, &sagaw)) {
1473 /* hardware doesn't support it, choose a bigger one */
1474 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1475 agaw = find_next_bit(&sagaw, 5, agaw);
1476 if (agaw >= 5)
1477 return -ENODEV;
1478 }
1479 domain->agaw = agaw;
1480 INIT_LIST_HEAD(&domain->devices);
1481
Weidong Han8e6040972008-12-08 15:49:06 +08001482 if (ecap_coherent(iommu->ecap))
1483 domain->iommu_coherency = 1;
1484 else
1485 domain->iommu_coherency = 0;
1486
Sheng Yang58c610b2009-03-18 15:33:05 +08001487 if (ecap_sc_support(iommu->ecap))
1488 domain->iommu_snooping = 1;
1489 else
1490 domain->iommu_snooping = 0;
1491
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001492 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
Weidong Hanc7151a82008-12-08 22:51:37 +08001493 domain->iommu_count = 1;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001494 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001495
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001496 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001497 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001498 if (!domain->pgd)
1499 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001500 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001501 return 0;
1502}
1503
1504static void domain_exit(struct dmar_domain *domain)
1505{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001506 struct dmar_drhd_unit *drhd;
1507 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001508
1509 /* Domain 0 is reserved, so dont process it */
1510 if (!domain)
1511 return;
1512
Alex Williamson7b668352011-05-24 12:02:41 +01001513 /* Flush any lazy unmaps that may reference this domain */
1514 if (!intel_iommu_strict)
1515 flush_unmaps_timeout(0);
1516
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001517 domain_remove_dev_info(domain);
1518 /* destroy iovas */
1519 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001520
1521 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001522 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001523
1524 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001525 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001526
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001527 for_each_active_iommu(iommu, drhd)
1528 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1529 iommu_detach_domain(domain, iommu);
1530
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001531 free_domain_mem(domain);
1532}
1533
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001534static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1535 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001536{
1537 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001538 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001539 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001540 struct dma_pte *pgd;
1541 unsigned long num;
1542 unsigned long ndomains;
1543 int id;
1544 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001545 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001546
1547 pr_debug("Set context mapping for %02x:%02x.%d\n",
1548 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001549
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001550 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001551 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1552 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001553
David Woodhouse276dbf992009-04-04 01:45:37 +01001554 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001555 if (!iommu)
1556 return -ENODEV;
1557
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001558 context = device_to_context_entry(iommu, bus, devfn);
1559 if (!context)
1560 return -ENOMEM;
1561 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001562 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001563 spin_unlock_irqrestore(&iommu->lock, flags);
1564 return 0;
1565 }
1566
Weidong Hanea6606b2008-12-08 23:08:15 +08001567 id = domain->id;
1568 pgd = domain->pgd;
1569
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001570 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1571 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001572 int found = 0;
1573
1574 /* find an available domain id for this device in iommu */
1575 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001576 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001577 if (iommu->domains[num] == domain) {
1578 id = num;
1579 found = 1;
1580 break;
1581 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001582 }
1583
1584 if (found == 0) {
1585 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1586 if (num >= ndomains) {
1587 spin_unlock_irqrestore(&iommu->lock, flags);
1588 printk(KERN_ERR "IOMMU: no free domain ids\n");
1589 return -EFAULT;
1590 }
1591
1592 set_bit(num, iommu->domain_ids);
1593 iommu->domains[num] = domain;
1594 id = num;
1595 }
1596
1597 /* Skip top levels of page tables for
1598 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001599 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001600 */
Chris Wright1672af12009-12-02 12:06:34 -08001601 if (translation != CONTEXT_TT_PASS_THROUGH) {
1602 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1603 pgd = phys_to_virt(dma_pte_addr(pgd));
1604 if (!dma_pte_present(pgd)) {
1605 spin_unlock_irqrestore(&iommu->lock, flags);
1606 return -ENOMEM;
1607 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001608 }
1609 }
1610 }
1611
1612 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001613
Yu Zhao93a23a72009-05-18 13:51:37 +08001614 if (translation != CONTEXT_TT_PASS_THROUGH) {
1615 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1616 translation = info ? CONTEXT_TT_DEV_IOTLB :
1617 CONTEXT_TT_MULTI_LEVEL;
1618 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001619 /*
1620 * In pass through mode, AW must be programmed to indicate the largest
1621 * AGAW value supported by hardware. And ASR is ignored by hardware.
1622 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001623 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001624 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001625 else {
1626 context_set_address_root(context, virt_to_phys(pgd));
1627 context_set_address_width(context, iommu->agaw);
1628 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001629
1630 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001631 context_set_fault_enable(context);
1632 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001633 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001634
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001635 /*
1636 * It's a non-present to present mapping. If hardware doesn't cache
1637 * non-present entry we only need to flush the write-buffer. If the
1638 * _does_ cache non-present entries, then it does so in the special
1639 * domain #0, which we have to flush:
1640 */
1641 if (cap_caching_mode(iommu->cap)) {
1642 iommu->flush.flush_context(iommu, 0,
1643 (((u16)bus) << 8) | devfn,
1644 DMA_CCMD_MASK_NOBIT,
1645 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001646 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001647 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001648 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001649 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001650 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001651 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001652
1653 spin_lock_irqsave(&domain->iommu_lock, flags);
1654 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1655 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001656 if (domain->iommu_count == 1)
1657 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001658 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001659 }
1660 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001661 return 0;
1662}
1663
1664static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001665domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1666 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001667{
1668 int ret;
1669 struct pci_dev *tmp, *parent;
1670
David Woodhouse276dbf992009-04-04 01:45:37 +01001671 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001672 pdev->bus->number, pdev->devfn,
1673 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001674 if (ret)
1675 return ret;
1676
1677 /* dependent device mapping */
1678 tmp = pci_find_upstream_pcie_bridge(pdev);
1679 if (!tmp)
1680 return 0;
1681 /* Secondary interface's bus number and devfn 0 */
1682 parent = pdev->bus->self;
1683 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001684 ret = domain_context_mapping_one(domain,
1685 pci_domain_nr(parent->bus),
1686 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001687 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001688 if (ret)
1689 return ret;
1690 parent = parent->bus->self;
1691 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001692 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001693 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001694 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001695 tmp->subordinate->number, 0,
1696 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001697 else /* this is a legacy PCI bridge */
1698 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001699 pci_domain_nr(tmp->bus),
1700 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001701 tmp->devfn,
1702 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001703}
1704
Weidong Han5331fe62008-12-08 23:00:00 +08001705static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001706{
1707 int ret;
1708 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001709 struct intel_iommu *iommu;
1710
David Woodhouse276dbf992009-04-04 01:45:37 +01001711 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1712 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001713 if (!iommu)
1714 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001715
David Woodhouse276dbf992009-04-04 01:45:37 +01001716 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001717 if (!ret)
1718 return ret;
1719 /* dependent device mapping */
1720 tmp = pci_find_upstream_pcie_bridge(pdev);
1721 if (!tmp)
1722 return ret;
1723 /* Secondary interface's bus number and devfn 0 */
1724 parent = pdev->bus->self;
1725 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001726 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001727 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001728 if (!ret)
1729 return ret;
1730 parent = parent->bus->self;
1731 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001732 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001733 return device_context_mapped(iommu, tmp->subordinate->number,
1734 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001735 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001736 return device_context_mapped(iommu, tmp->bus->number,
1737 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001738}
1739
Fenghua Yuf5329592009-08-04 15:09:37 -07001740/* Returns a number of VTD pages, but aligned to MM page size */
1741static inline unsigned long aligned_nrpages(unsigned long host_addr,
1742 size_t size)
1743{
1744 host_addr &= ~PAGE_MASK;
1745 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1746}
1747
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001748/* Return largest possible superpage level for a given mapping */
1749static inline int hardware_largepage_caps(struct dmar_domain *domain,
1750 unsigned long iov_pfn,
1751 unsigned long phy_pfn,
1752 unsigned long pages)
1753{
1754 int support, level = 1;
1755 unsigned long pfnmerge;
1756
1757 support = domain->iommu_superpage;
1758
1759 /* To use a large page, the virtual *and* physical addresses
1760 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1761 of them will mean we have to use smaller pages. So just
1762 merge them and check both at once. */
1763 pfnmerge = iov_pfn | phy_pfn;
1764
1765 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1766 pages >>= VTD_STRIDE_SHIFT;
1767 if (!pages)
1768 break;
1769 pfnmerge >>= VTD_STRIDE_SHIFT;
1770 level++;
1771 support--;
1772 }
1773 return level;
1774}
1775
David Woodhouse9051aa02009-06-29 12:30:54 +01001776static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1777 struct scatterlist *sg, unsigned long phys_pfn,
1778 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001779{
1780 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001781 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001782 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001783 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001784 unsigned int largepage_lvl = 0;
1785 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001786
1787 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1788
1789 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1790 return -EINVAL;
1791
1792 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1793
David Woodhouse9051aa02009-06-29 12:30:54 +01001794 if (sg)
1795 sg_res = 0;
1796 else {
1797 sg_res = nr_pages + 1;
1798 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1799 }
1800
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001801 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001802 uint64_t tmp;
1803
David Woodhousee1605492009-06-29 11:17:38 +01001804 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001805 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001806 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1807 sg->dma_length = sg->length;
1808 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001809 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001810 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001811
David Woodhousee1605492009-06-29 11:17:38 +01001812 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001813 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1814
1815 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01001816 if (!pte)
1817 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001818 /* It is large page*/
1819 if (largepage_lvl > 1)
1820 pteval |= DMA_PTE_LARGE_PAGE;
1821 else
1822 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
1823
David Woodhousee1605492009-06-29 11:17:38 +01001824 }
1825 /* We don't need lock here, nobody else
1826 * touches the iova range
1827 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001828 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001829 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001830 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001831 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1832 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001833 if (dumps) {
1834 dumps--;
1835 debug_dma_dump_mappings(NULL);
1836 }
1837 WARN_ON(1);
1838 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001839
1840 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1841
1842 BUG_ON(nr_pages < lvl_pages);
1843 BUG_ON(sg_res < lvl_pages);
1844
1845 nr_pages -= lvl_pages;
1846 iov_pfn += lvl_pages;
1847 phys_pfn += lvl_pages;
1848 pteval += lvl_pages * VTD_PAGE_SIZE;
1849 sg_res -= lvl_pages;
1850
1851 /* If the next PTE would be the first in a new page, then we
1852 need to flush the cache on the entries we've just written.
1853 And then we'll need to recalculate 'pte', so clear it and
1854 let it get set again in the if (!pte) block above.
1855
1856 If we're done (!nr_pages) we need to flush the cache too.
1857
1858 Also if we've been setting superpages, we may need to
1859 recalculate 'pte' and switch back to smaller pages for the
1860 end of the mapping, if the trailing size is not enough to
1861 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01001862 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001863 if (!nr_pages || first_pte_in_page(pte) ||
1864 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01001865 domain_flush_cache(domain, first_pte,
1866 (void *)pte - (void *)first_pte);
1867 pte = NULL;
1868 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001869
1870 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01001871 sg = sg_next(sg);
1872 }
1873 return 0;
1874}
1875
David Woodhouse9051aa02009-06-29 12:30:54 +01001876static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1877 struct scatterlist *sg, unsigned long nr_pages,
1878 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001879{
David Woodhouse9051aa02009-06-29 12:30:54 +01001880 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1881}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001882
David Woodhouse9051aa02009-06-29 12:30:54 +01001883static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1884 unsigned long phys_pfn, unsigned long nr_pages,
1885 int prot)
1886{
1887 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001888}
1889
Weidong Hanc7151a82008-12-08 22:51:37 +08001890static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001891{
Weidong Hanc7151a82008-12-08 22:51:37 +08001892 if (!iommu)
1893 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001894
1895 clear_context_table(iommu, bus, devfn);
1896 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001897 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001898 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001899}
1900
1901static void domain_remove_dev_info(struct dmar_domain *domain)
1902{
1903 struct device_domain_info *info;
1904 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001905 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001906
1907 spin_lock_irqsave(&device_domain_lock, flags);
1908 while (!list_empty(&domain->devices)) {
1909 info = list_entry(domain->devices.next,
1910 struct device_domain_info, link);
1911 list_del(&info->link);
1912 list_del(&info->global);
1913 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001914 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001915 spin_unlock_irqrestore(&device_domain_lock, flags);
1916
Yu Zhao93a23a72009-05-18 13:51:37 +08001917 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001918 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001919 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001920 free_devinfo_mem(info);
1921
1922 spin_lock_irqsave(&device_domain_lock, flags);
1923 }
1924 spin_unlock_irqrestore(&device_domain_lock, flags);
1925}
1926
1927/*
1928 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001929 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001930 */
Kay, Allen M38717942008-09-09 18:37:29 +03001931static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001932find_domain(struct pci_dev *pdev)
1933{
1934 struct device_domain_info *info;
1935
1936 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001937 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001938 if (info)
1939 return info->domain;
1940 return NULL;
1941}
1942
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001943/* domain is initialized */
1944static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1945{
1946 struct dmar_domain *domain, *found = NULL;
1947 struct intel_iommu *iommu;
1948 struct dmar_drhd_unit *drhd;
1949 struct device_domain_info *info, *tmp;
1950 struct pci_dev *dev_tmp;
1951 unsigned long flags;
1952 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001953 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001954 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001955
1956 domain = find_domain(pdev);
1957 if (domain)
1958 return domain;
1959
David Woodhouse276dbf992009-04-04 01:45:37 +01001960 segment = pci_domain_nr(pdev->bus);
1961
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001962 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1963 if (dev_tmp) {
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001964 if (pci_is_pcie(dev_tmp)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001965 bus = dev_tmp->subordinate->number;
1966 devfn = 0;
1967 } else {
1968 bus = dev_tmp->bus->number;
1969 devfn = dev_tmp->devfn;
1970 }
1971 spin_lock_irqsave(&device_domain_lock, flags);
1972 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001973 if (info->segment == segment &&
1974 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001975 found = info->domain;
1976 break;
1977 }
1978 }
1979 spin_unlock_irqrestore(&device_domain_lock, flags);
1980 /* pcie-pci bridge already has a domain, uses it */
1981 if (found) {
1982 domain = found;
1983 goto found_domain;
1984 }
1985 }
1986
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001987 domain = alloc_domain();
1988 if (!domain)
1989 goto error;
1990
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001991 /* Allocate new domain for the device */
1992 drhd = dmar_find_matched_drhd_unit(pdev);
1993 if (!drhd) {
1994 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1995 pci_name(pdev));
1996 return NULL;
1997 }
1998 iommu = drhd->iommu;
1999
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002000 ret = iommu_attach_domain(domain, iommu);
2001 if (ret) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002002 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002003 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002004 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002005
2006 if (domain_init(domain, gaw)) {
2007 domain_exit(domain);
2008 goto error;
2009 }
2010
2011 /* register pcie-to-pci device */
2012 if (dev_tmp) {
2013 info = alloc_devinfo_mem();
2014 if (!info) {
2015 domain_exit(domain);
2016 goto error;
2017 }
David Woodhouse276dbf992009-04-04 01:45:37 +01002018 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002019 info->bus = bus;
2020 info->devfn = devfn;
2021 info->dev = NULL;
2022 info->domain = domain;
2023 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08002024 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002025
2026 /* pcie-to-pci bridge already has a domain, uses it */
2027 found = NULL;
2028 spin_lock_irqsave(&device_domain_lock, flags);
2029 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01002030 if (tmp->segment == segment &&
2031 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002032 found = tmp->domain;
2033 break;
2034 }
2035 }
2036 if (found) {
Jiri Slaby00dfff72010-06-14 17:17:32 +02002037 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002038 free_devinfo_mem(info);
2039 domain_exit(domain);
2040 domain = found;
2041 } else {
2042 list_add(&info->link, &domain->devices);
2043 list_add(&info->global, &device_domain_list);
Jiri Slaby00dfff72010-06-14 17:17:32 +02002044 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002045 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002046 }
2047
2048found_domain:
2049 info = alloc_devinfo_mem();
2050 if (!info)
2051 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01002052 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002053 info->bus = pdev->bus->number;
2054 info->devfn = pdev->devfn;
2055 info->dev = pdev;
2056 info->domain = domain;
2057 spin_lock_irqsave(&device_domain_lock, flags);
2058 /* somebody is fast */
2059 found = find_domain(pdev);
2060 if (found != NULL) {
2061 spin_unlock_irqrestore(&device_domain_lock, flags);
2062 if (found != domain) {
2063 domain_exit(domain);
2064 domain = found;
2065 }
2066 free_devinfo_mem(info);
2067 return domain;
2068 }
2069 list_add(&info->link, &domain->devices);
2070 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002071 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002072 spin_unlock_irqrestore(&device_domain_lock, flags);
2073 return domain;
2074error:
2075 /* recheck it here, maybe others set it */
2076 return find_domain(pdev);
2077}
2078
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002079static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002080#define IDENTMAP_ALL 1
2081#define IDENTMAP_GFX 2
2082#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002083
David Woodhouseb2132032009-06-26 18:50:28 +01002084static int iommu_domain_identity_map(struct dmar_domain *domain,
2085 unsigned long long start,
2086 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002087{
David Woodhousec5395d52009-06-28 16:35:56 +01002088 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2089 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002090
David Woodhousec5395d52009-06-28 16:35:56 +01002091 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2092 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002093 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002094 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002095 }
2096
David Woodhousec5395d52009-06-28 16:35:56 +01002097 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2098 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002099 /*
2100 * RMRR range might have overlap with physical memory range,
2101 * clear it first
2102 */
David Woodhousec5395d52009-06-28 16:35:56 +01002103 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002104
David Woodhousec5395d52009-06-28 16:35:56 +01002105 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2106 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002107 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002108}
2109
2110static int iommu_prepare_identity_map(struct pci_dev *pdev,
2111 unsigned long long start,
2112 unsigned long long end)
2113{
2114 struct dmar_domain *domain;
2115 int ret;
2116
David Woodhousec7ab48d2009-06-26 19:10:36 +01002117 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002118 if (!domain)
2119 return -ENOMEM;
2120
David Woodhouse19943b02009-08-04 16:19:20 +01002121 /* For _hardware_ passthrough, don't bother. But for software
2122 passthrough, we do it anyway -- it may indicate a memory
2123 range which is reserved in E820, so which didn't get set
2124 up to start with in si_domain */
2125 if (domain == si_domain && hw_pass_through) {
2126 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2127 pci_name(pdev), start, end);
2128 return 0;
2129 }
2130
2131 printk(KERN_INFO
2132 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2133 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002134
David Woodhouse5595b522009-12-02 09:21:55 +00002135 if (end < start) {
2136 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2137 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2138 dmi_get_system_info(DMI_BIOS_VENDOR),
2139 dmi_get_system_info(DMI_BIOS_VERSION),
2140 dmi_get_system_info(DMI_PRODUCT_VERSION));
2141 ret = -EIO;
2142 goto error;
2143 }
2144
David Woodhouse2ff729f2009-08-26 14:25:41 +01002145 if (end >> agaw_to_width(domain->agaw)) {
2146 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2147 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2148 agaw_to_width(domain->agaw),
2149 dmi_get_system_info(DMI_BIOS_VENDOR),
2150 dmi_get_system_info(DMI_BIOS_VERSION),
2151 dmi_get_system_info(DMI_PRODUCT_VERSION));
2152 ret = -EIO;
2153 goto error;
2154 }
David Woodhouse19943b02009-08-04 16:19:20 +01002155
David Woodhouseb2132032009-06-26 18:50:28 +01002156 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002157 if (ret)
2158 goto error;
2159
2160 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002161 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002162 if (ret)
2163 goto error;
2164
2165 return 0;
2166
2167 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002168 domain_exit(domain);
2169 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002170}
2171
2172static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2173 struct pci_dev *pdev)
2174{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002175 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002176 return 0;
2177 return iommu_prepare_identity_map(pdev, rmrr->base_address,
David Woodhouse70e535d2011-05-31 00:22:52 +01002178 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002179}
2180
Suresh Siddhad3f13812011-08-23 17:05:25 -07002181#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002182static inline void iommu_prepare_isa(void)
2183{
2184 struct pci_dev *pdev;
2185 int ret;
2186
2187 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2188 if (!pdev)
2189 return;
2190
David Woodhousec7ab48d2009-06-26 19:10:36 +01002191 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse70e535d2011-05-31 00:22:52 +01002192 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002193
2194 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002195 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2196 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002197
2198}
2199#else
2200static inline void iommu_prepare_isa(void)
2201{
2202 return;
2203}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002204#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002205
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002206static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002207
Matt Kraai071e1372009-08-23 22:30:22 -07002208static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002209{
2210 struct dmar_drhd_unit *drhd;
2211 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002212 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002213
2214 si_domain = alloc_domain();
2215 if (!si_domain)
2216 return -EFAULT;
2217
David Woodhousec7ab48d2009-06-26 19:10:36 +01002218 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002219
2220 for_each_active_iommu(iommu, drhd) {
2221 ret = iommu_attach_domain(si_domain, iommu);
2222 if (ret) {
2223 domain_exit(si_domain);
2224 return -EFAULT;
2225 }
2226 }
2227
2228 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2229 domain_exit(si_domain);
2230 return -EFAULT;
2231 }
2232
2233 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2234
David Woodhouse19943b02009-08-04 16:19:20 +01002235 if (hw)
2236 return 0;
2237
David Woodhousec7ab48d2009-06-26 19:10:36 +01002238 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002239 unsigned long start_pfn, end_pfn;
2240 int i;
2241
2242 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2243 ret = iommu_domain_identity_map(si_domain,
2244 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2245 if (ret)
2246 return ret;
2247 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002248 }
2249
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002250 return 0;
2251}
2252
2253static void domain_remove_one_dev_info(struct dmar_domain *domain,
2254 struct pci_dev *pdev);
2255static int identity_mapping(struct pci_dev *pdev)
2256{
2257 struct device_domain_info *info;
2258
2259 if (likely(!iommu_identity_mapping))
2260 return 0;
2261
Mike Traviscb452a42011-05-28 13:15:03 -05002262 info = pdev->dev.archdata.iommu;
2263 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2264 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002265
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002266 return 0;
2267}
2268
2269static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002270 struct pci_dev *pdev,
2271 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002272{
2273 struct device_domain_info *info;
2274 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002275 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002276
2277 info = alloc_devinfo_mem();
2278 if (!info)
2279 return -ENOMEM;
2280
David Woodhouse5fe60f42009-08-09 10:53:41 +01002281 ret = domain_context_mapping(domain, pdev, translation);
2282 if (ret) {
2283 free_devinfo_mem(info);
2284 return ret;
2285 }
2286
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002287 info->segment = pci_domain_nr(pdev->bus);
2288 info->bus = pdev->bus->number;
2289 info->devfn = pdev->devfn;
2290 info->dev = pdev;
2291 info->domain = domain;
2292
2293 spin_lock_irqsave(&device_domain_lock, flags);
2294 list_add(&info->link, &domain->devices);
2295 list_add(&info->global, &device_domain_list);
2296 pdev->dev.archdata.iommu = info;
2297 spin_unlock_irqrestore(&device_domain_lock, flags);
2298
2299 return 0;
2300}
2301
David Woodhouse6941af22009-07-04 18:24:27 +01002302static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2303{
David Woodhousee0fc7e02009-09-30 09:12:17 -07002304 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2305 return 1;
2306
2307 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2308 return 1;
2309
2310 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2311 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002312
David Woodhouse3dfc8132009-07-04 19:11:08 +01002313 /*
2314 * We want to start off with all devices in the 1:1 domain, and
2315 * take them out later if we find they can't access all of memory.
2316 *
2317 * However, we can't do this for PCI devices behind bridges,
2318 * because all PCI devices behind the same bridge will end up
2319 * with the same source-id on their transactions.
2320 *
2321 * Practically speaking, we can't change things around for these
2322 * devices at run-time, because we can't be sure there'll be no
2323 * DMA transactions in flight for any of their siblings.
2324 *
2325 * So PCI devices (unless they're on the root bus) as well as
2326 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2327 * the 1:1 domain, just in _case_ one of their siblings turns out
2328 * not to be able to map all of memory.
2329 */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002330 if (!pci_is_pcie(pdev)) {
David Woodhouse3dfc8132009-07-04 19:11:08 +01002331 if (!pci_is_root_bus(pdev->bus))
2332 return 0;
2333 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2334 return 0;
2335 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2336 return 0;
2337
2338 /*
2339 * At boot time, we don't yet know if devices will be 64-bit capable.
2340 * Assume that they will -- if they turn out not to be, then we can
2341 * take them out of the 1:1 domain later.
2342 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002343 if (!startup) {
2344 /*
2345 * If the device's dma_mask is less than the system's memory
2346 * size then this is not a candidate for identity mapping.
2347 */
2348 u64 dma_mask = pdev->dma_mask;
2349
2350 if (pdev->dev.coherent_dma_mask &&
2351 pdev->dev.coherent_dma_mask < dma_mask)
2352 dma_mask = pdev->dev.coherent_dma_mask;
2353
2354 return dma_mask >= dma_get_required_mask(&pdev->dev);
2355 }
David Woodhouse6941af22009-07-04 18:24:27 +01002356
2357 return 1;
2358}
2359
Matt Kraai071e1372009-08-23 22:30:22 -07002360static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002361{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002362 struct pci_dev *pdev = NULL;
2363 int ret;
2364
David Woodhouse19943b02009-08-04 16:19:20 +01002365 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002366 if (ret)
2367 return -EFAULT;
2368
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002369 for_each_pci_dev(pdev) {
David Woodhouse6941af22009-07-04 18:24:27 +01002370 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse5fe60f42009-08-09 10:53:41 +01002371 ret = domain_add_dev_info(si_domain, pdev,
Mike Traviseae460b2012-03-05 15:05:16 -08002372 hw ? CONTEXT_TT_PASS_THROUGH :
2373 CONTEXT_TT_MULTI_LEVEL);
2374 if (ret) {
2375 /* device not associated with an iommu */
2376 if (ret == -ENODEV)
2377 continue;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002378 return ret;
Mike Traviseae460b2012-03-05 15:05:16 -08002379 }
2380 pr_info("IOMMU: %s identity mapping for device %s\n",
2381 hw ? "hardware" : "software", pci_name(pdev));
David Woodhouse62edf5d2009-07-04 10:59:46 +01002382 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002383 }
2384
2385 return 0;
2386}
2387
Joseph Cihulab7792602011-05-03 00:08:37 -07002388static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002389{
2390 struct dmar_drhd_unit *drhd;
2391 struct dmar_rmrr_unit *rmrr;
2392 struct pci_dev *pdev;
2393 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002394 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002395
2396 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002397 * for each drhd
2398 * allocate root
2399 * initialize and program root entry to not present
2400 * endfor
2401 */
2402 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002403 g_num_of_iommus++;
2404 /*
2405 * lock not needed as this is only incremented in the single
2406 * threaded kernel __init code path all other access are read
2407 * only
2408 */
2409 }
2410
Weidong Hand9630fe2008-12-08 11:06:32 +08002411 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2412 GFP_KERNEL);
2413 if (!g_iommus) {
2414 printk(KERN_ERR "Allocating global iommu array failed\n");
2415 ret = -ENOMEM;
2416 goto error;
2417 }
2418
mark gross80b20dd2008-04-18 13:53:58 -07002419 deferred_flush = kzalloc(g_num_of_iommus *
2420 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2421 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002422 ret = -ENOMEM;
2423 goto error;
2424 }
2425
mark gross5e0d2a62008-03-04 15:22:08 -08002426 for_each_drhd_unit(drhd) {
2427 if (drhd->ignored)
2428 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002429
2430 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002431 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002432
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002433 ret = iommu_init_domains(iommu);
2434 if (ret)
2435 goto error;
2436
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002437 /*
2438 * TBD:
2439 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002440 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002441 */
2442 ret = iommu_alloc_root_entry(iommu);
2443 if (ret) {
2444 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2445 goto error;
2446 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002447 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002448 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002449 }
2450
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002451 /*
2452 * Start from the sane iommu hardware state.
2453 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002454 for_each_drhd_unit(drhd) {
2455 if (drhd->ignored)
2456 continue;
2457
2458 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002459
2460 /*
2461 * If the queued invalidation is already initialized by us
2462 * (for example, while enabling interrupt-remapping) then
2463 * we got the things already rolling from a sane state.
2464 */
2465 if (iommu->qi)
2466 continue;
2467
2468 /*
2469 * Clear any previous faults.
2470 */
2471 dmar_fault(-1, iommu);
2472 /*
2473 * Disable queued invalidation if supported and already enabled
2474 * before OS handover.
2475 */
2476 dmar_disable_qi(iommu);
2477 }
2478
2479 for_each_drhd_unit(drhd) {
2480 if (drhd->ignored)
2481 continue;
2482
2483 iommu = drhd->iommu;
2484
Youquan Songa77b67d2008-10-16 16:31:56 -07002485 if (dmar_enable_qi(iommu)) {
2486 /*
2487 * Queued Invalidate not enabled, use Register Based
2488 * Invalidate
2489 */
2490 iommu->flush.flush_context = __iommu_flush_context;
2491 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002492 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002493 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002494 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002495 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002496 } else {
2497 iommu->flush.flush_context = qi_flush_context;
2498 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002499 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002500 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002501 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002502 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002503 }
2504 }
2505
David Woodhouse19943b02009-08-04 16:19:20 +01002506 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002507 iommu_identity_mapping |= IDENTMAP_ALL;
2508
Suresh Siddhad3f13812011-08-23 17:05:25 -07002509#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002510 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002511#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002512
2513 check_tylersburg_isoch();
2514
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002515 /*
2516 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002517 * identity mappings for rmrr, gfx, and isa and may fall back to static
2518 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002519 */
David Woodhouse19943b02009-08-04 16:19:20 +01002520 if (iommu_identity_mapping) {
2521 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2522 if (ret) {
2523 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2524 goto error;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002525 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002526 }
David Woodhouse19943b02009-08-04 16:19:20 +01002527 /*
2528 * For each rmrr
2529 * for each dev attached to rmrr
2530 * do
2531 * locate drhd for dev, alloc domain for dev
2532 * allocate free domain
2533 * allocate page table entries for rmrr
2534 * if context not allocated for bus
2535 * allocate and init context
2536 * set present in root table for this bus
2537 * init context with domain, translation etc
2538 * endfor
2539 * endfor
2540 */
2541 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2542 for_each_rmrr_units(rmrr) {
2543 for (i = 0; i < rmrr->devices_cnt; i++) {
2544 pdev = rmrr->devices[i];
2545 /*
2546 * some BIOS lists non-exist devices in DMAR
2547 * table.
2548 */
2549 if (!pdev)
2550 continue;
2551 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2552 if (ret)
2553 printk(KERN_ERR
2554 "IOMMU: mapping reserved region failed\n");
2555 }
2556 }
2557
2558 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002559
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002560 /*
2561 * for each drhd
2562 * enable fault log
2563 * global invalidate context cache
2564 * global invalidate iotlb
2565 * enable translation
2566 */
2567 for_each_drhd_unit(drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002568 if (drhd->ignored) {
2569 /*
2570 * we always have to disable PMRs or DMA may fail on
2571 * this device
2572 */
2573 if (force_on)
2574 iommu_disable_protect_mem_regions(drhd->iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002575 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002576 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002577 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002578
2579 iommu_flush_write_buffer(iommu);
2580
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002581 ret = dmar_set_interrupt(iommu);
2582 if (ret)
2583 goto error;
2584
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002585 iommu_set_root_entry(iommu);
2586
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002587 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002588 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002589
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002590 ret = iommu_enable_translation(iommu);
2591 if (ret)
2592 goto error;
David Woodhouseb94996c2009-09-19 15:28:12 -07002593
2594 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002595 }
2596
2597 return 0;
2598error:
2599 for_each_drhd_unit(drhd) {
2600 if (drhd->ignored)
2601 continue;
2602 iommu = drhd->iommu;
2603 free_iommu(iommu);
2604 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002605 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002606 return ret;
2607}
2608
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002609/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002610static struct iova *intel_alloc_iova(struct device *dev,
2611 struct dmar_domain *domain,
2612 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002613{
2614 struct pci_dev *pdev = to_pci_dev(dev);
2615 struct iova *iova = NULL;
2616
David Woodhouse875764d2009-06-28 21:20:51 +01002617 /* Restrict dma_mask to the width that the iommu can handle */
2618 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2619
2620 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002621 /*
2622 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002623 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002624 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002625 */
David Woodhouse875764d2009-06-28 21:20:51 +01002626 iova = alloc_iova(&domain->iovad, nrpages,
2627 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2628 if (iova)
2629 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002630 }
David Woodhouse875764d2009-06-28 21:20:51 +01002631 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2632 if (unlikely(!iova)) {
2633 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2634 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002635 return NULL;
2636 }
2637
2638 return iova;
2639}
2640
David Woodhouse147202a2009-07-07 19:43:20 +01002641static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002642{
2643 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002644 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002645
2646 domain = get_domain_for_dev(pdev,
2647 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2648 if (!domain) {
2649 printk(KERN_ERR
2650 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002651 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002652 }
2653
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002654 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002655 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002656 ret = domain_context_mapping(domain, pdev,
2657 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002658 if (ret) {
2659 printk(KERN_ERR
2660 "Domain context map for %s failed",
2661 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002662 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002663 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002664 }
2665
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002666 return domain;
2667}
2668
David Woodhouse147202a2009-07-07 19:43:20 +01002669static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2670{
2671 struct device_domain_info *info;
2672
2673 /* No lock here, assumes no domain exit in normal case */
2674 info = dev->dev.archdata.iommu;
2675 if (likely(info))
2676 return info->domain;
2677
2678 return __get_valid_domain_for_dev(dev);
2679}
2680
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002681static int iommu_dummy(struct pci_dev *pdev)
2682{
2683 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2684}
2685
2686/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002687static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002688{
David Woodhouse73676832009-07-04 14:08:36 +01002689 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002690 int found;
2691
David Woodhouse73676832009-07-04 14:08:36 +01002692 if (unlikely(dev->bus != &pci_bus_type))
2693 return 1;
2694
2695 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002696 if (iommu_dummy(pdev))
2697 return 1;
2698
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002699 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002700 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002701
2702 found = identity_mapping(pdev);
2703 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002704 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002705 return 1;
2706 else {
2707 /*
2708 * 32 bit DMA is removed from si_domain and fall back
2709 * to non-identity mapping.
2710 */
2711 domain_remove_one_dev_info(si_domain, pdev);
2712 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2713 pci_name(pdev));
2714 return 0;
2715 }
2716 } else {
2717 /*
2718 * In case of a detached 64 bit DMA device from vm, the device
2719 * is put into si_domain for identity mapping.
2720 */
David Woodhouse6941af22009-07-04 18:24:27 +01002721 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002722 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002723 ret = domain_add_dev_info(si_domain, pdev,
2724 hw_pass_through ?
2725 CONTEXT_TT_PASS_THROUGH :
2726 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002727 if (!ret) {
2728 printk(KERN_INFO "64bit %s uses identity mapping\n",
2729 pci_name(pdev));
2730 return 1;
2731 }
2732 }
2733 }
2734
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002735 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002736}
2737
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002738static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2739 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002740{
2741 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002742 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002743 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002744 struct iova *iova;
2745 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002746 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002747 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002748 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002749
2750 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002751
David Woodhouse73676832009-07-04 14:08:36 +01002752 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002753 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002754
2755 domain = get_valid_domain_for_dev(pdev);
2756 if (!domain)
2757 return 0;
2758
Weidong Han8c11e792008-12-08 15:29:22 +08002759 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002760 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002761
Mike Travisc681d0b2011-05-28 13:15:05 -05002762 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002763 if (!iova)
2764 goto error;
2765
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002766 /*
2767 * Check if DMAR supports zero-length reads on write only
2768 * mappings..
2769 */
2770 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002771 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002772 prot |= DMA_PTE_READ;
2773 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2774 prot |= DMA_PTE_WRITE;
2775 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002776 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002777 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002778 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002779 * is not a big problem
2780 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002781 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002782 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002783 if (ret)
2784 goto error;
2785
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002786 /* it's a non-present to present mapping. Only flush if caching mode */
2787 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002788 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002789 else
Weidong Han8c11e792008-12-08 15:29:22 +08002790 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002791
David Woodhouse03d6a242009-06-28 15:33:46 +01002792 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2793 start_paddr += paddr & ~PAGE_MASK;
2794 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002795
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002796error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002797 if (iova)
2798 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002799 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002800 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002801 return 0;
2802}
2803
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002804static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2805 unsigned long offset, size_t size,
2806 enum dma_data_direction dir,
2807 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002808{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002809 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2810 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002811}
2812
mark gross5e0d2a62008-03-04 15:22:08 -08002813static void flush_unmaps(void)
2814{
mark gross80b20dd2008-04-18 13:53:58 -07002815 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002816
mark gross5e0d2a62008-03-04 15:22:08 -08002817 timer_on = 0;
2818
2819 /* just flush them all */
2820 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002821 struct intel_iommu *iommu = g_iommus[i];
2822 if (!iommu)
2823 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002824
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002825 if (!deferred_flush[i].next)
2826 continue;
2827
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002828 /* In caching mode, global flushes turn emulation expensive */
2829 if (!cap_caching_mode(iommu->cap))
2830 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002831 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002832 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002833 unsigned long mask;
2834 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002835 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08002836
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002837 /* On real hardware multiple invalidations are expensive */
2838 if (cap_caching_mode(iommu->cap))
2839 iommu_flush_iotlb_psi(iommu, domain->id,
2840 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2841 else {
2842 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2843 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2844 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2845 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002846 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002847 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002848 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002849 }
2850
mark gross5e0d2a62008-03-04 15:22:08 -08002851 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002852}
2853
2854static void flush_unmaps_timeout(unsigned long data)
2855{
mark gross80b20dd2008-04-18 13:53:58 -07002856 unsigned long flags;
2857
2858 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002859 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002860 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002861}
2862
2863static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2864{
2865 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002866 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002867 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002868
2869 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002870 if (list_size == HIGH_WATER_MARK)
2871 flush_unmaps();
2872
Weidong Han8c11e792008-12-08 15:29:22 +08002873 iommu = domain_get_iommu(dom);
2874 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002875
mark gross80b20dd2008-04-18 13:53:58 -07002876 next = deferred_flush[iommu_id].next;
2877 deferred_flush[iommu_id].domain[next] = dom;
2878 deferred_flush[iommu_id].iova[next] = iova;
2879 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002880
2881 if (!timer_on) {
2882 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2883 timer_on = 1;
2884 }
2885 list_size++;
2886 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2887}
2888
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002889static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2890 size_t size, enum dma_data_direction dir,
2891 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002892{
2893 struct pci_dev *pdev = to_pci_dev(dev);
2894 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002895 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002896 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002897 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002898
David Woodhouse73676832009-07-04 14:08:36 +01002899 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002900 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002901
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002902 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002903 BUG_ON(!domain);
2904
Weidong Han8c11e792008-12-08 15:29:22 +08002905 iommu = domain_get_iommu(domain);
2906
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002907 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002908 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2909 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002910 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002911
David Woodhoused794dc92009-06-28 00:27:49 +01002912 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2913 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002914
David Woodhoused794dc92009-06-28 00:27:49 +01002915 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2916 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002917
2918 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002919 dma_pte_clear_range(domain, start_pfn, last_pfn);
2920
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002921 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002922 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2923
mark gross5e0d2a62008-03-04 15:22:08 -08002924 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002925 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002926 last_pfn - start_pfn + 1, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08002927 /* free iova */
2928 __free_iova(&domain->iovad, iova);
2929 } else {
2930 add_unmap(domain, iova);
2931 /*
2932 * queue up the release of the unmap to save the 1/6th of the
2933 * cpu used up by the iotlb flush operation...
2934 */
mark gross5e0d2a62008-03-04 15:22:08 -08002935 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002936}
2937
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002938static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2939 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002940{
2941 void *vaddr;
2942 int order;
2943
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002944 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002945 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07002946
2947 if (!iommu_no_mapping(hwdev))
2948 flags &= ~(GFP_DMA | GFP_DMA32);
2949 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2950 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2951 flags |= GFP_DMA;
2952 else
2953 flags |= GFP_DMA32;
2954 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002955
2956 vaddr = (void *)__get_free_pages(flags, order);
2957 if (!vaddr)
2958 return NULL;
2959 memset(vaddr, 0, size);
2960
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002961 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2962 DMA_BIDIRECTIONAL,
2963 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002964 if (*dma_handle)
2965 return vaddr;
2966 free_pages((unsigned long)vaddr, order);
2967 return NULL;
2968}
2969
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002970static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2971 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002972{
2973 int order;
2974
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002975 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002976 order = get_order(size);
2977
David Woodhouse0db9b7a2009-07-14 02:01:57 +01002978 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002979 free_pages((unsigned long)vaddr, order);
2980}
2981
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002982static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2983 int nelems, enum dma_data_direction dir,
2984 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002985{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002986 struct pci_dev *pdev = to_pci_dev(hwdev);
2987 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002988 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002989 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002990 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002991
David Woodhouse73676832009-07-04 14:08:36 +01002992 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002993 return;
2994
2995 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002996 BUG_ON(!domain);
2997
2998 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002999
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003000 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01003001 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3002 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003003 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003004
David Woodhoused794dc92009-06-28 00:27:49 +01003005 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3006 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003007
3008 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01003009 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003010
David Woodhoused794dc92009-06-28 00:27:49 +01003011 /* free page tables */
3012 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3013
David Woodhouseacea0012009-07-14 01:55:11 +01003014 if (intel_iommu_strict) {
3015 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03003016 last_pfn - start_pfn + 1, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003017 /* free iova */
3018 __free_iova(&domain->iovad, iova);
3019 } else {
3020 add_unmap(domain, iova);
3021 /*
3022 * queue up the release of the unmap to save the 1/6th of the
3023 * cpu used up by the iotlb flush operation...
3024 */
3025 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003026}
3027
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003028static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003029 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003030{
3031 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003032 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003033
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003034 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003035 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003036 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003037 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003038 }
3039 return nelems;
3040}
3041
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003042static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3043 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003044{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003045 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003046 struct pci_dev *pdev = to_pci_dev(hwdev);
3047 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003048 size_t size = 0;
3049 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003050 struct iova *iova = NULL;
3051 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003052 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003053 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003054 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003055
3056 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01003057 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003058 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003059
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003060 domain = get_valid_domain_for_dev(pdev);
3061 if (!domain)
3062 return 0;
3063
Weidong Han8c11e792008-12-08 15:29:22 +08003064 iommu = domain_get_iommu(domain);
3065
David Woodhouseb536d242009-06-28 14:49:31 +01003066 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003067 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003068
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003069 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3070 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003071 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003072 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003073 return 0;
3074 }
3075
3076 /*
3077 * Check if DMAR supports zero-length reads on write only
3078 * mappings..
3079 */
3080 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003081 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003082 prot |= DMA_PTE_READ;
3083 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3084 prot |= DMA_PTE_WRITE;
3085
David Woodhouseb536d242009-06-28 14:49:31 +01003086 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003087
Fenghua Yuf5329592009-08-04 15:09:37 -07003088 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003089 if (unlikely(ret)) {
3090 /* clear the page */
3091 dma_pte_clear_range(domain, start_vpfn,
3092 start_vpfn + size - 1);
3093 /* free page tables */
3094 dma_pte_free_pagetable(domain, start_vpfn,
3095 start_vpfn + size - 1);
3096 /* free iova */
3097 __free_iova(&domain->iovad, iova);
3098 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003099 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003100
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003101 /* it's a non-present to present mapping. Only flush if caching mode */
3102 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03003103 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003104 else
Weidong Han8c11e792008-12-08 15:29:22 +08003105 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003106
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003107 return nelems;
3108}
3109
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003110static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3111{
3112 return !dma_addr;
3113}
3114
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003115struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003116 .alloc_coherent = intel_alloc_coherent,
3117 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003118 .map_sg = intel_map_sg,
3119 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003120 .map_page = intel_map_page,
3121 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003122 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003123};
3124
3125static inline int iommu_domain_cache_init(void)
3126{
3127 int ret = 0;
3128
3129 iommu_domain_cache = kmem_cache_create("iommu_domain",
3130 sizeof(struct dmar_domain),
3131 0,
3132 SLAB_HWCACHE_ALIGN,
3133
3134 NULL);
3135 if (!iommu_domain_cache) {
3136 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3137 ret = -ENOMEM;
3138 }
3139
3140 return ret;
3141}
3142
3143static inline int iommu_devinfo_cache_init(void)
3144{
3145 int ret = 0;
3146
3147 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3148 sizeof(struct device_domain_info),
3149 0,
3150 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003151 NULL);
3152 if (!iommu_devinfo_cache) {
3153 printk(KERN_ERR "Couldn't create devinfo cache\n");
3154 ret = -ENOMEM;
3155 }
3156
3157 return ret;
3158}
3159
3160static inline int iommu_iova_cache_init(void)
3161{
3162 int ret = 0;
3163
3164 iommu_iova_cache = kmem_cache_create("iommu_iova",
3165 sizeof(struct iova),
3166 0,
3167 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003168 NULL);
3169 if (!iommu_iova_cache) {
3170 printk(KERN_ERR "Couldn't create iova cache\n");
3171 ret = -ENOMEM;
3172 }
3173
3174 return ret;
3175}
3176
3177static int __init iommu_init_mempool(void)
3178{
3179 int ret;
3180 ret = iommu_iova_cache_init();
3181 if (ret)
3182 return ret;
3183
3184 ret = iommu_domain_cache_init();
3185 if (ret)
3186 goto domain_error;
3187
3188 ret = iommu_devinfo_cache_init();
3189 if (!ret)
3190 return ret;
3191
3192 kmem_cache_destroy(iommu_domain_cache);
3193domain_error:
3194 kmem_cache_destroy(iommu_iova_cache);
3195
3196 return -ENOMEM;
3197}
3198
3199static void __init iommu_exit_mempool(void)
3200{
3201 kmem_cache_destroy(iommu_devinfo_cache);
3202 kmem_cache_destroy(iommu_domain_cache);
3203 kmem_cache_destroy(iommu_iova_cache);
3204
3205}
3206
Dan Williams556ab452010-07-23 15:47:56 -07003207static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3208{
3209 struct dmar_drhd_unit *drhd;
3210 u32 vtbar;
3211 int rc;
3212
3213 /* We know that this device on this chipset has its own IOMMU.
3214 * If we find it under a different IOMMU, then the BIOS is lying
3215 * to us. Hope that the IOMMU for this device is actually
3216 * disabled, and it needs no translation...
3217 */
3218 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3219 if (rc) {
3220 /* "can't" happen */
3221 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3222 return;
3223 }
3224 vtbar &= 0xffff0000;
3225
3226 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3227 drhd = dmar_find_matched_drhd_unit(pdev);
3228 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3229 TAINT_FIRMWARE_WORKAROUND,
3230 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3231 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3232}
3233DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3234
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003235static void __init init_no_remapping_devices(void)
3236{
3237 struct dmar_drhd_unit *drhd;
3238
3239 for_each_drhd_unit(drhd) {
3240 if (!drhd->include_all) {
3241 int i;
3242 for (i = 0; i < drhd->devices_cnt; i++)
3243 if (drhd->devices[i] != NULL)
3244 break;
3245 /* ignore DMAR unit if no pci devices exist */
3246 if (i == drhd->devices_cnt)
3247 drhd->ignored = 1;
3248 }
3249 }
3250
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003251 for_each_drhd_unit(drhd) {
3252 int i;
3253 if (drhd->ignored || drhd->include_all)
3254 continue;
3255
3256 for (i = 0; i < drhd->devices_cnt; i++)
3257 if (drhd->devices[i] &&
David Woodhousec0771df2011-10-14 20:59:46 +01003258 !IS_GFX_DEVICE(drhd->devices[i]))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003259 break;
3260
3261 if (i < drhd->devices_cnt)
3262 continue;
3263
David Woodhousec0771df2011-10-14 20:59:46 +01003264 /* This IOMMU has *only* gfx devices. Either bypass it or
3265 set the gfx_mapped flag, as appropriate */
3266 if (dmar_map_gfx) {
3267 intel_iommu_gfx_mapped = 1;
3268 } else {
3269 drhd->ignored = 1;
3270 for (i = 0; i < drhd->devices_cnt; i++) {
3271 if (!drhd->devices[i])
3272 continue;
3273 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3274 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003275 }
3276 }
3277}
3278
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003279#ifdef CONFIG_SUSPEND
3280static int init_iommu_hw(void)
3281{
3282 struct dmar_drhd_unit *drhd;
3283 struct intel_iommu *iommu = NULL;
3284
3285 for_each_active_iommu(iommu, drhd)
3286 if (iommu->qi)
3287 dmar_reenable_qi(iommu);
3288
Joseph Cihulab7792602011-05-03 00:08:37 -07003289 for_each_iommu(iommu, drhd) {
3290 if (drhd->ignored) {
3291 /*
3292 * we always have to disable PMRs or DMA may fail on
3293 * this device
3294 */
3295 if (force_on)
3296 iommu_disable_protect_mem_regions(iommu);
3297 continue;
3298 }
3299
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003300 iommu_flush_write_buffer(iommu);
3301
3302 iommu_set_root_entry(iommu);
3303
3304 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003305 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003306 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003307 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003308 if (iommu_enable_translation(iommu))
3309 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003310 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003311 }
3312
3313 return 0;
3314}
3315
3316static void iommu_flush_all(void)
3317{
3318 struct dmar_drhd_unit *drhd;
3319 struct intel_iommu *iommu;
3320
3321 for_each_active_iommu(iommu, drhd) {
3322 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003323 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003324 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003325 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003326 }
3327}
3328
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003329static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003330{
3331 struct dmar_drhd_unit *drhd;
3332 struct intel_iommu *iommu = NULL;
3333 unsigned long flag;
3334
3335 for_each_active_iommu(iommu, drhd) {
3336 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3337 GFP_ATOMIC);
3338 if (!iommu->iommu_state)
3339 goto nomem;
3340 }
3341
3342 iommu_flush_all();
3343
3344 for_each_active_iommu(iommu, drhd) {
3345 iommu_disable_translation(iommu);
3346
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003347 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003348
3349 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3350 readl(iommu->reg + DMAR_FECTL_REG);
3351 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3352 readl(iommu->reg + DMAR_FEDATA_REG);
3353 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3354 readl(iommu->reg + DMAR_FEADDR_REG);
3355 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3356 readl(iommu->reg + DMAR_FEUADDR_REG);
3357
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003358 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003359 }
3360 return 0;
3361
3362nomem:
3363 for_each_active_iommu(iommu, drhd)
3364 kfree(iommu->iommu_state);
3365
3366 return -ENOMEM;
3367}
3368
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003369static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003370{
3371 struct dmar_drhd_unit *drhd;
3372 struct intel_iommu *iommu = NULL;
3373 unsigned long flag;
3374
3375 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003376 if (force_on)
3377 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3378 else
3379 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003380 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003381 }
3382
3383 for_each_active_iommu(iommu, drhd) {
3384
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003385 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003386
3387 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3388 iommu->reg + DMAR_FECTL_REG);
3389 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3390 iommu->reg + DMAR_FEDATA_REG);
3391 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3392 iommu->reg + DMAR_FEADDR_REG);
3393 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3394 iommu->reg + DMAR_FEUADDR_REG);
3395
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003396 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003397 }
3398
3399 for_each_active_iommu(iommu, drhd)
3400 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003401}
3402
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003403static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003404 .resume = iommu_resume,
3405 .suspend = iommu_suspend,
3406};
3407
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003408static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003409{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003410 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003411}
3412
3413#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003414static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003415#endif /* CONFIG_PM */
3416
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003417LIST_HEAD(dmar_rmrr_units);
3418
3419static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3420{
3421 list_add(&rmrr->list, &dmar_rmrr_units);
3422}
3423
3424
3425int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3426{
3427 struct acpi_dmar_reserved_memory *rmrr;
3428 struct dmar_rmrr_unit *rmrru;
3429
3430 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3431 if (!rmrru)
3432 return -ENOMEM;
3433
3434 rmrru->hdr = header;
3435 rmrr = (struct acpi_dmar_reserved_memory *)header;
3436 rmrru->base_address = rmrr->base_address;
3437 rmrru->end_address = rmrr->end_address;
3438
3439 dmar_register_rmrr_unit(rmrru);
3440 return 0;
3441}
3442
3443static int __init
3444rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3445{
3446 struct acpi_dmar_reserved_memory *rmrr;
3447 int ret;
3448
3449 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3450 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3451 ((void *)rmrr) + rmrr->header.length,
3452 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3453
3454 if (ret || (rmrru->devices_cnt == 0)) {
3455 list_del(&rmrru->list);
3456 kfree(rmrru);
3457 }
3458 return ret;
3459}
3460
3461static LIST_HEAD(dmar_atsr_units);
3462
3463int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3464{
3465 struct acpi_dmar_atsr *atsr;
3466 struct dmar_atsr_unit *atsru;
3467
3468 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3469 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3470 if (!atsru)
3471 return -ENOMEM;
3472
3473 atsru->hdr = hdr;
3474 atsru->include_all = atsr->flags & 0x1;
3475
3476 list_add(&atsru->list, &dmar_atsr_units);
3477
3478 return 0;
3479}
3480
3481static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3482{
3483 int rc;
3484 struct acpi_dmar_atsr *atsr;
3485
3486 if (atsru->include_all)
3487 return 0;
3488
3489 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3490 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3491 (void *)atsr + atsr->header.length,
3492 &atsru->devices_cnt, &atsru->devices,
3493 atsr->segment);
3494 if (rc || !atsru->devices_cnt) {
3495 list_del(&atsru->list);
3496 kfree(atsru);
3497 }
3498
3499 return rc;
3500}
3501
3502int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3503{
3504 int i;
3505 struct pci_bus *bus;
3506 struct acpi_dmar_atsr *atsr;
3507 struct dmar_atsr_unit *atsru;
3508
3509 dev = pci_physfn(dev);
3510
3511 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3512 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3513 if (atsr->segment == pci_domain_nr(dev->bus))
3514 goto found;
3515 }
3516
3517 return 0;
3518
3519found:
3520 for (bus = dev->bus; bus; bus = bus->parent) {
3521 struct pci_dev *bridge = bus->self;
3522
3523 if (!bridge || !pci_is_pcie(bridge) ||
3524 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
3525 return 0;
3526
3527 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
3528 for (i = 0; i < atsru->devices_cnt; i++)
3529 if (atsru->devices[i] == bridge)
3530 return 1;
3531 break;
3532 }
3533 }
3534
3535 if (atsru->include_all)
3536 return 1;
3537
3538 return 0;
3539}
3540
Sergey Senozhatskyc8f369a2011-10-26 18:45:39 +03003541int __init dmar_parse_rmrr_atsr_dev(void)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003542{
3543 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3544 struct dmar_atsr_unit *atsr, *atsr_n;
3545 int ret = 0;
3546
3547 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3548 ret = rmrr_parse_dev(rmrr);
3549 if (ret)
3550 return ret;
3551 }
3552
3553 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3554 ret = atsr_parse_dev(atsr);
3555 if (ret)
3556 return ret;
3557 }
3558
3559 return ret;
3560}
3561
Fenghua Yu99dcade2009-11-11 07:23:06 -08003562/*
3563 * Here we only respond to action of unbound device from driver.
3564 *
3565 * Added device is not attached to its DMAR domain here yet. That will happen
3566 * when mapping the device to iova.
3567 */
3568static int device_notifier(struct notifier_block *nb,
3569 unsigned long action, void *data)
3570{
3571 struct device *dev = data;
3572 struct pci_dev *pdev = to_pci_dev(dev);
3573 struct dmar_domain *domain;
3574
David Woodhouse44cd6132009-12-02 10:18:30 +00003575 if (iommu_no_mapping(dev))
3576 return 0;
3577
Fenghua Yu99dcade2009-11-11 07:23:06 -08003578 domain = find_domain(pdev);
3579 if (!domain)
3580 return 0;
3581
Alex Williamsona97590e2011-03-04 14:52:16 -07003582 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
Fenghua Yu99dcade2009-11-11 07:23:06 -08003583 domain_remove_one_dev_info(domain, pdev);
3584
Alex Williamsona97590e2011-03-04 14:52:16 -07003585 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3586 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3587 list_empty(&domain->devices))
3588 domain_exit(domain);
3589 }
3590
Fenghua Yu99dcade2009-11-11 07:23:06 -08003591 return 0;
3592}
3593
3594static struct notifier_block device_nb = {
3595 .notifier_call = device_notifier,
3596};
3597
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003598int __init intel_iommu_init(void)
3599{
3600 int ret = 0;
3601
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003602 /* VT-d is required for a TXT/tboot launch, so enforce that */
3603 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003604
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003605 if (dmar_table_init()) {
3606 if (force_on)
3607 panic("tboot: Failed to initialize DMAR table\n");
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003608 return -ENODEV;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003609 }
3610
Suresh Siddhac2c72862011-08-23 17:05:19 -07003611 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003612 if (force_on)
3613 panic("tboot: Failed to initialize DMAR device scope\n");
3614 return -ENODEV;
3615 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003616
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003617 if (no_iommu || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003618 return -ENODEV;
3619
Joseph Cihula51a63e62011-03-21 11:04:24 -07003620 if (iommu_init_mempool()) {
3621 if (force_on)
3622 panic("tboot: Failed to initialize iommu memory\n");
3623 return -ENODEV;
3624 }
3625
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003626 if (list_empty(&dmar_rmrr_units))
3627 printk(KERN_INFO "DMAR: No RMRR found\n");
3628
3629 if (list_empty(&dmar_atsr_units))
3630 printk(KERN_INFO "DMAR: No ATSR found\n");
3631
Joseph Cihula51a63e62011-03-21 11:04:24 -07003632 if (dmar_init_reserved_ranges()) {
3633 if (force_on)
3634 panic("tboot: Failed to reserve iommu ranges\n");
3635 return -ENODEV;
3636 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003637
3638 init_no_remapping_devices();
3639
Joseph Cihulab7792602011-05-03 00:08:37 -07003640 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003641 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003642 if (force_on)
3643 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003644 printk(KERN_ERR "IOMMU: dmar init failed\n");
3645 put_iova_domain(&reserved_iova_list);
3646 iommu_exit_mempool();
3647 return ret;
3648 }
3649 printk(KERN_INFO
3650 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3651
mark gross5e0d2a62008-03-04 15:22:08 -08003652 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003653#ifdef CONFIG_SWIOTLB
3654 swiotlb = 0;
3655#endif
David Woodhouse19943b02009-08-04 16:19:20 +01003656 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003657
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003658 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003659
Joerg Roedel4236d97d2011-09-06 17:56:07 +02003660 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003661
Fenghua Yu99dcade2009-11-11 07:23:06 -08003662 bus_register_notifier(&pci_bus_type, &device_nb);
3663
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02003664 intel_iommu_enabled = 1;
3665
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003666 return 0;
3667}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003668
Han, Weidong3199aa62009-02-26 17:31:12 +08003669static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3670 struct pci_dev *pdev)
3671{
3672 struct pci_dev *tmp, *parent;
3673
3674 if (!iommu || !pdev)
3675 return;
3676
3677 /* dependent device detach */
3678 tmp = pci_find_upstream_pcie_bridge(pdev);
3679 /* Secondary interface's bus number and devfn 0 */
3680 if (tmp) {
3681 parent = pdev->bus->self;
3682 while (parent != tmp) {
3683 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003684 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003685 parent = parent->bus->self;
3686 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05003687 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08003688 iommu_detach_dev(iommu,
3689 tmp->subordinate->number, 0);
3690 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003691 iommu_detach_dev(iommu, tmp->bus->number,
3692 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003693 }
3694}
3695
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003696static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003697 struct pci_dev *pdev)
3698{
3699 struct device_domain_info *info;
3700 struct intel_iommu *iommu;
3701 unsigned long flags;
3702 int found = 0;
3703 struct list_head *entry, *tmp;
3704
David Woodhouse276dbf992009-04-04 01:45:37 +01003705 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3706 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003707 if (!iommu)
3708 return;
3709
3710 spin_lock_irqsave(&device_domain_lock, flags);
3711 list_for_each_safe(entry, tmp, &domain->devices) {
3712 info = list_entry(entry, struct device_domain_info, link);
Mike Habeck8519dc42011-05-28 13:15:07 -05003713 if (info->segment == pci_domain_nr(pdev->bus) &&
3714 info->bus == pdev->bus->number &&
Weidong Hanc7151a82008-12-08 22:51:37 +08003715 info->devfn == pdev->devfn) {
3716 list_del(&info->link);
3717 list_del(&info->global);
3718 if (info->dev)
3719 info->dev->dev.archdata.iommu = NULL;
3720 spin_unlock_irqrestore(&device_domain_lock, flags);
3721
Yu Zhao93a23a72009-05-18 13:51:37 +08003722 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003723 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003724 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003725 free_devinfo_mem(info);
3726
3727 spin_lock_irqsave(&device_domain_lock, flags);
3728
3729 if (found)
3730 break;
3731 else
3732 continue;
3733 }
3734
3735 /* if there is no other devices under the same iommu
3736 * owned by this domain, clear this iommu in iommu_bmp
3737 * update iommu count and coherency
3738 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003739 if (iommu == device_to_iommu(info->segment, info->bus,
3740 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003741 found = 1;
3742 }
3743
Roland Dreier3e7abe22011-07-20 06:22:21 -07003744 spin_unlock_irqrestore(&device_domain_lock, flags);
3745
Weidong Hanc7151a82008-12-08 22:51:37 +08003746 if (found == 0) {
3747 unsigned long tmp_flags;
3748 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3749 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3750 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003751 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003752 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07003753
Alex Williamson9b4554b2011-05-24 12:19:04 -04003754 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3755 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3756 spin_lock_irqsave(&iommu->lock, tmp_flags);
3757 clear_bit(domain->id, iommu->domain_ids);
3758 iommu->domains[domain->id] = NULL;
3759 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3760 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003761 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003762}
3763
3764static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3765{
3766 struct device_domain_info *info;
3767 struct intel_iommu *iommu;
3768 unsigned long flags1, flags2;
3769
3770 spin_lock_irqsave(&device_domain_lock, flags1);
3771 while (!list_empty(&domain->devices)) {
3772 info = list_entry(domain->devices.next,
3773 struct device_domain_info, link);
3774 list_del(&info->link);
3775 list_del(&info->global);
3776 if (info->dev)
3777 info->dev->dev.archdata.iommu = NULL;
3778
3779 spin_unlock_irqrestore(&device_domain_lock, flags1);
3780
Yu Zhao93a23a72009-05-18 13:51:37 +08003781 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003782 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003783 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003784 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003785
3786 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003787 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003788 */
3789 spin_lock_irqsave(&domain->iommu_lock, flags2);
3790 if (test_and_clear_bit(iommu->seq_id,
3791 &domain->iommu_bmp)) {
3792 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003793 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003794 }
3795 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3796
3797 free_devinfo_mem(info);
3798 spin_lock_irqsave(&device_domain_lock, flags1);
3799 }
3800 spin_unlock_irqrestore(&device_domain_lock, flags1);
3801}
3802
Weidong Han5e98c4b2008-12-08 23:03:27 +08003803/* domain id for virtual machine, it won't be set in context */
3804static unsigned long vm_domid;
3805
3806static struct dmar_domain *iommu_alloc_vm_domain(void)
3807{
3808 struct dmar_domain *domain;
3809
3810 domain = alloc_domain_mem();
3811 if (!domain)
3812 return NULL;
3813
3814 domain->id = vm_domid++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003815 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003816 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3817 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3818
3819 return domain;
3820}
3821
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003822static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003823{
3824 int adjust_width;
3825
3826 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003827 spin_lock_init(&domain->iommu_lock);
3828
3829 domain_reserve_special_ranges(domain);
3830
3831 /* calculate AGAW */
3832 domain->gaw = guest_width;
3833 adjust_width = guestwidth_to_adjustwidth(guest_width);
3834 domain->agaw = width_to_agaw(adjust_width);
3835
3836 INIT_LIST_HEAD(&domain->devices);
3837
3838 domain->iommu_count = 0;
3839 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003840 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01003841 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003842 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003843 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003844
3845 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07003846 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003847 if (!domain->pgd)
3848 return -ENOMEM;
3849 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3850 return 0;
3851}
3852
3853static void iommu_free_vm_domain(struct dmar_domain *domain)
3854{
3855 unsigned long flags;
3856 struct dmar_drhd_unit *drhd;
3857 struct intel_iommu *iommu;
3858 unsigned long i;
3859 unsigned long ndomains;
3860
3861 for_each_drhd_unit(drhd) {
3862 if (drhd->ignored)
3863 continue;
3864 iommu = drhd->iommu;
3865
3866 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08003867 for_each_set_bit(i, iommu->domain_ids, ndomains) {
Weidong Han5e98c4b2008-12-08 23:03:27 +08003868 if (iommu->domains[i] == domain) {
3869 spin_lock_irqsave(&iommu->lock, flags);
3870 clear_bit(i, iommu->domain_ids);
3871 iommu->domains[i] = NULL;
3872 spin_unlock_irqrestore(&iommu->lock, flags);
3873 break;
3874 }
Weidong Han5e98c4b2008-12-08 23:03:27 +08003875 }
3876 }
3877}
3878
3879static void vm_domain_exit(struct dmar_domain *domain)
3880{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003881 /* Domain 0 is reserved, so dont process it */
3882 if (!domain)
3883 return;
3884
3885 vm_domain_remove_all_dev_info(domain);
3886 /* destroy iovas */
3887 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003888
3889 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003890 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003891
3892 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003893 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003894
3895 iommu_free_vm_domain(domain);
3896 free_domain_mem(domain);
3897}
3898
Joerg Roedel5d450802008-12-03 14:52:32 +01003899static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003900{
Joerg Roedel5d450802008-12-03 14:52:32 +01003901 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003902
Joerg Roedel5d450802008-12-03 14:52:32 +01003903 dmar_domain = iommu_alloc_vm_domain();
3904 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003905 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003906 "intel_iommu_domain_init: dmar_domain == NULL\n");
3907 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003908 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003909 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003910 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003911 "intel_iommu_domain_init() failed\n");
3912 vm_domain_exit(dmar_domain);
3913 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003914 }
Allen Kay8140a952011-10-14 12:32:17 -07003915 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01003916 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003917
Joerg Roedel5d450802008-12-03 14:52:32 +01003918 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003919}
Kay, Allen M38717942008-09-09 18:37:29 +03003920
Joerg Roedel5d450802008-12-03 14:52:32 +01003921static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003922{
Joerg Roedel5d450802008-12-03 14:52:32 +01003923 struct dmar_domain *dmar_domain = domain->priv;
3924
3925 domain->priv = NULL;
3926 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003927}
Kay, Allen M38717942008-09-09 18:37:29 +03003928
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003929static int intel_iommu_attach_device(struct iommu_domain *domain,
3930 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003931{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003932 struct dmar_domain *dmar_domain = domain->priv;
3933 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003934 struct intel_iommu *iommu;
3935 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03003936
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003937 /* normally pdev is not mapped */
3938 if (unlikely(domain_context_mapped(pdev))) {
3939 struct dmar_domain *old_domain;
3940
3941 old_domain = find_domain(pdev);
3942 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003943 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3944 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3945 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003946 else
3947 domain_remove_dev_info(old_domain);
3948 }
3949 }
3950
David Woodhouse276dbf992009-04-04 01:45:37 +01003951 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3952 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003953 if (!iommu)
3954 return -ENODEV;
3955
3956 /* check if this iommu agaw is sufficient for max mapped address */
3957 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01003958 if (addr_width > cap_mgaw(iommu->cap))
3959 addr_width = cap_mgaw(iommu->cap);
3960
3961 if (dmar_domain->max_addr > (1LL << addr_width)) {
3962 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003963 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01003964 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003965 return -EFAULT;
3966 }
Tom Lyona99c47a2010-05-17 08:20:45 +01003967 dmar_domain->gaw = addr_width;
3968
3969 /*
3970 * Knock out extra levels of page tables if necessary
3971 */
3972 while (iommu->agaw < dmar_domain->agaw) {
3973 struct dma_pte *pte;
3974
3975 pte = dmar_domain->pgd;
3976 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08003977 dmar_domain->pgd = (struct dma_pte *)
3978 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01003979 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01003980 }
3981 dmar_domain->agaw--;
3982 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003983
David Woodhouse5fe60f42009-08-09 10:53:41 +01003984 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003985}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003986
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003987static void intel_iommu_detach_device(struct iommu_domain *domain,
3988 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003989{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003990 struct dmar_domain *dmar_domain = domain->priv;
3991 struct pci_dev *pdev = to_pci_dev(dev);
3992
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003993 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003994}
Kay, Allen M38717942008-09-09 18:37:29 +03003995
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003996static int intel_iommu_map(struct iommu_domain *domain,
3997 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02003998 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03003999{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004000 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004001 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004002 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004003 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004004
Joerg Roedeldde57a22008-12-03 15:04:09 +01004005 if (iommu_prot & IOMMU_READ)
4006 prot |= DMA_PTE_READ;
4007 if (iommu_prot & IOMMU_WRITE)
4008 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004009 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4010 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004011
David Woodhouse163cc522009-06-28 00:51:17 +01004012 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004013 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004014 u64 end;
4015
4016 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004017 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004018 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004019 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004020 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004021 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004022 return -EFAULT;
4023 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004024 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004025 }
David Woodhousead051222009-06-28 14:22:28 +01004026 /* Round up size to next multiple of PAGE_SIZE, if it and
4027 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004028 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004029 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4030 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004031 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004032}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004033
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004034static size_t intel_iommu_unmap(struct iommu_domain *domain,
4035 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004036{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004037 struct dmar_domain *dmar_domain = domain->priv;
Allen Kay292827c2011-10-14 12:31:54 -07004038 int order;
Sheng Yang4b99d352009-07-08 11:52:52 +01004039
Allen Kay292827c2011-10-14 12:31:54 -07004040 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
David Woodhouse163cc522009-06-28 00:51:17 +01004041 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004042
David Woodhouse163cc522009-06-28 00:51:17 +01004043 if (dmar_domain->max_addr == iova + size)
4044 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004045
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004046 return PAGE_SIZE << order;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004047}
Kay, Allen M38717942008-09-09 18:37:29 +03004048
Joerg Roedeld14d6572008-12-03 15:06:57 +01004049static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4050 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004051{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004052 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004053 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004054 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004055
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004056 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
Kay, Allen M38717942008-09-09 18:37:29 +03004057 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004058 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004059
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004060 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004061}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004062
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004063static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4064 unsigned long cap)
4065{
4066 struct dmar_domain *dmar_domain = domain->priv;
4067
4068 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4069 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004070 if (cap == IOMMU_CAP_INTR_REMAP)
4071 return intr_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004072
4073 return 0;
4074}
4075
Alex Williamson70ae6f02011-10-21 15:56:11 -04004076/*
4077 * Group numbers are arbitrary. Device with the same group number
4078 * indicate the iommu cannot differentiate between them. To avoid
4079 * tracking used groups we just use the seg|bus|devfn of the lowest
4080 * level we're able to differentiate devices
4081 */
4082static int intel_iommu_device_group(struct device *dev, unsigned int *groupid)
4083{
4084 struct pci_dev *pdev = to_pci_dev(dev);
4085 struct pci_dev *bridge;
4086 union {
4087 struct {
4088 u8 devfn;
4089 u8 bus;
4090 u16 segment;
4091 } pci;
4092 u32 group;
4093 } id;
4094
4095 if (iommu_no_mapping(dev))
4096 return -ENODEV;
4097
4098 id.pci.segment = pci_domain_nr(pdev->bus);
4099 id.pci.bus = pdev->bus->number;
4100 id.pci.devfn = pdev->devfn;
4101
4102 if (!device_to_iommu(id.pci.segment, id.pci.bus, id.pci.devfn))
4103 return -ENODEV;
4104
4105 bridge = pci_find_upstream_pcie_bridge(pdev);
4106 if (bridge) {
4107 if (pci_is_pcie(bridge)) {
4108 id.pci.bus = bridge->subordinate->number;
4109 id.pci.devfn = 0;
4110 } else {
4111 id.pci.bus = bridge->bus->number;
4112 id.pci.devfn = bridge->devfn;
4113 }
4114 }
4115
Alex Williamsonbcb71ab2011-10-21 15:56:24 -04004116 if (!pdev->is_virtfn && iommu_group_mf)
4117 id.pci.devfn = PCI_DEVFN(PCI_SLOT(id.pci.devfn), 0);
4118
Alex Williamson70ae6f02011-10-21 15:56:11 -04004119 *groupid = id.group;
4120
4121 return 0;
4122}
4123
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004124static struct iommu_ops intel_iommu_ops = {
4125 .domain_init = intel_iommu_domain_init,
4126 .domain_destroy = intel_iommu_domain_destroy,
4127 .attach_dev = intel_iommu_attach_device,
4128 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004129 .map = intel_iommu_map,
4130 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004131 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004132 .domain_has_cap = intel_iommu_domain_has_cap,
Alex Williamson70ae6f02011-10-21 15:56:11 -04004133 .device_group = intel_iommu_device_group,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004134 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004135};
David Woodhouse9af88142009-02-13 23:18:03 +00004136
4137static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
4138{
4139 /*
4140 * Mobile 4 Series Chipset neglects to set RWBF capability,
4141 * but needs it:
4142 */
4143 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4144 rwbf_quirk = 1;
David Woodhouse2d9e6672010-06-15 10:57:57 +01004145
4146 /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
4147 if (dev->revision == 0x07) {
4148 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4149 dmar_map_gfx = 0;
4150 }
David Woodhouse9af88142009-02-13 23:18:03 +00004151}
4152
4153DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004154
Adam Jacksoneecfd572010-08-25 21:17:34 +01004155#define GGC 0x52
4156#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4157#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4158#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4159#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4160#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4161#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4162#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4163#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4164
David Woodhouse9eecabc2010-09-21 22:28:23 +01004165static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4166{
4167 unsigned short ggc;
4168
Adam Jacksoneecfd572010-08-25 21:17:34 +01004169 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004170 return;
4171
Adam Jacksoneecfd572010-08-25 21:17:34 +01004172 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004173 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4174 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004175 } else if (dmar_map_gfx) {
4176 /* we have to ensure the gfx device is idle before we flush */
4177 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4178 intel_iommu_strict = 1;
4179 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004180}
4181DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4182DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4183DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4184DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4185
David Woodhousee0fc7e02009-09-30 09:12:17 -07004186/* On Tylersburg chipsets, some BIOSes have been known to enable the
4187 ISOCH DMAR unit for the Azalia sound device, but not give it any
4188 TLB entries, which causes it to deadlock. Check for that. We do
4189 this in a function called from init_dmars(), instead of in a PCI
4190 quirk, because we don't want to print the obnoxious "BIOS broken"
4191 message if VT-d is actually disabled.
4192*/
4193static void __init check_tylersburg_isoch(void)
4194{
4195 struct pci_dev *pdev;
4196 uint32_t vtisochctrl;
4197
4198 /* If there's no Azalia in the system anyway, forget it. */
4199 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4200 if (!pdev)
4201 return;
4202 pci_dev_put(pdev);
4203
4204 /* System Management Registers. Might be hidden, in which case
4205 we can't do the sanity check. But that's OK, because the
4206 known-broken BIOSes _don't_ actually hide it, so far. */
4207 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4208 if (!pdev)
4209 return;
4210
4211 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4212 pci_dev_put(pdev);
4213 return;
4214 }
4215
4216 pci_dev_put(pdev);
4217
4218 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4219 if (vtisochctrl & 1)
4220 return;
4221
4222 /* Drop all bits other than the number of TLB entries */
4223 vtisochctrl &= 0x1c;
4224
4225 /* If we have the recommended number of TLB entries (16), fine. */
4226 if (vtisochctrl == 0x10)
4227 return;
4228
4229 /* Zero TLB entries? You get to ride the short bus to school. */
4230 if (!vtisochctrl) {
4231 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4232 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4233 dmi_get_system_info(DMI_BIOS_VENDOR),
4234 dmi_get_system_info(DMI_BIOS_VERSION),
4235 dmi_get_system_info(DMI_PRODUCT_VERSION));
4236 iommu_identity_mapping |= IDENTMAP_AZALIA;
4237 return;
4238 }
4239
4240 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4241 vtisochctrl);
4242}