blob: f93d5ac8f81c0b2ff02b6f97b2ae079b8b3f80a7 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040027#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070028#include <linux/slab.h>
29#include <linux/irq.h>
30#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070031#include <linux/spinlock.h>
32#include <linux/pci.h>
33#include <linux/dmar.h>
34#include <linux/dma-mapping.h>
35#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080044#include <linux/memblock.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070045#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090046#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070047
Fenghua Yu5b6985c2008-10-16 18:02:32 -070048#define ROOT_SIZE VTD_PAGE_SIZE
49#define CONTEXT_SIZE VTD_PAGE_SIZE
50
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070051#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
52#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070053#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070054
55#define IOAPIC_RANGE_START (0xfee00000)
56#define IOAPIC_RANGE_END (0xfeefffff)
57#define IOVA_START_ADDR (0x1000)
58
59#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
60
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070061#define MAX_AGAW_WIDTH 64
62
David Woodhouse2ebe3152009-09-19 07:34:04 -070063#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
64#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
65
66/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
67 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
68#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
69 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
70#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070071
Mark McLoughlinf27be032008-11-20 15:49:43 +000072#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070073#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070074#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080075
Andrew Mortondf08cdc2010-09-22 13:05:11 -070076/* page table handling */
77#define LEVEL_STRIDE (9)
78#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
79
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020080/*
81 * This bitmap is used to advertise the page sizes our hardware support
82 * to the IOMMU core, which will then use this information to split
83 * physically contiguous memory regions it is mapping into page sizes
84 * that we support.
85 *
86 * Traditionally the IOMMU core just handed us the mappings directly,
87 * after making sure the size is an order of a 4KiB page and that the
88 * mapping has natural alignment.
89 *
90 * To retain this behavior, we currently advertise that we support
91 * all page sizes that are an order of 4KiB.
92 *
93 * If at some point we'd like to utilize the IOMMU core's new behavior,
94 * we could change this to advertise the real page sizes we support.
95 */
96#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
97
Andrew Mortondf08cdc2010-09-22 13:05:11 -070098static inline int agaw_to_level(int agaw)
99{
100 return agaw + 2;
101}
102
103static inline int agaw_to_width(int agaw)
104{
105 return 30 + agaw * LEVEL_STRIDE;
106}
107
108static inline int width_to_agaw(int width)
109{
110 return (width - 30) / LEVEL_STRIDE;
111}
112
113static inline unsigned int level_to_offset_bits(int level)
114{
115 return (level - 1) * LEVEL_STRIDE;
116}
117
118static inline int pfn_level_offset(unsigned long pfn, int level)
119{
120 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
121}
122
123static inline unsigned long level_mask(int level)
124{
125 return -1UL << level_to_offset_bits(level);
126}
127
128static inline unsigned long level_size(int level)
129{
130 return 1UL << level_to_offset_bits(level);
131}
132
133static inline unsigned long align_to_level(unsigned long pfn, int level)
134{
135 return (pfn + level_size(level) - 1) & level_mask(level);
136}
David Woodhousefd18de52009-05-10 23:57:41 +0100137
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100138static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
139{
140 return 1 << ((lvl - 1) * LEVEL_STRIDE);
141}
142
David Woodhousedd4e8312009-06-27 16:21:20 +0100143/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
144 are never going to work. */
145static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
146{
147 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
148}
149
150static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
151{
152 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
153}
154static inline unsigned long page_to_dma_pfn(struct page *pg)
155{
156 return mm_to_dma_pfn(page_to_pfn(pg));
157}
158static inline unsigned long virt_to_dma_pfn(void *p)
159{
160 return page_to_dma_pfn(virt_to_page(p));
161}
162
Weidong Hand9630fe2008-12-08 11:06:32 +0800163/* global iommu list, set NULL for ignored DMAR units */
164static struct intel_iommu **g_iommus;
165
David Woodhousee0fc7e02009-09-30 09:12:17 -0700166static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000167static int rwbf_quirk;
168
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000169/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700170 * set to 1 to panic kernel if can't successfully enable VT-d
171 * (used when kernel is launched w/ TXT)
172 */
173static int force_on = 0;
174
175/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000176 * 0: Present
177 * 1-11: Reserved
178 * 12-63: Context Ptr (12 - (haw-1))
179 * 64-127: Reserved
180 */
181struct root_entry {
182 u64 val;
183 u64 rsvd1;
184};
185#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
186static inline bool root_present(struct root_entry *root)
187{
188 return (root->val & 1);
189}
190static inline void set_root_present(struct root_entry *root)
191{
192 root->val |= 1;
193}
194static inline void set_root_value(struct root_entry *root, unsigned long value)
195{
196 root->val |= value & VTD_PAGE_MASK;
197}
198
199static inline struct context_entry *
200get_context_addr_from_root(struct root_entry *root)
201{
202 return (struct context_entry *)
203 (root_present(root)?phys_to_virt(
204 root->val & VTD_PAGE_MASK) :
205 NULL);
206}
207
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000208/*
209 * low 64 bits:
210 * 0: present
211 * 1: fault processing disable
212 * 2-3: translation type
213 * 12-63: address space root
214 * high 64 bits:
215 * 0-2: address width
216 * 3-6: aval
217 * 8-23: domain id
218 */
219struct context_entry {
220 u64 lo;
221 u64 hi;
222};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000223
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000224static inline bool context_present(struct context_entry *context)
225{
226 return (context->lo & 1);
227}
228static inline void context_set_present(struct context_entry *context)
229{
230 context->lo |= 1;
231}
232
233static inline void context_set_fault_enable(struct context_entry *context)
234{
235 context->lo &= (((u64)-1) << 2) | 1;
236}
237
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000238static inline void context_set_translation_type(struct context_entry *context,
239 unsigned long value)
240{
241 context->lo &= (((u64)-1) << 4) | 3;
242 context->lo |= (value & 3) << 2;
243}
244
245static inline void context_set_address_root(struct context_entry *context,
246 unsigned long value)
247{
248 context->lo |= value & VTD_PAGE_MASK;
249}
250
251static inline void context_set_address_width(struct context_entry *context,
252 unsigned long value)
253{
254 context->hi |= value & 7;
255}
256
257static inline void context_set_domain_id(struct context_entry *context,
258 unsigned long value)
259{
260 context->hi |= (value & ((1 << 16) - 1)) << 8;
261}
262
263static inline void context_clear_entry(struct context_entry *context)
264{
265 context->lo = 0;
266 context->hi = 0;
267}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000268
Mark McLoughlin622ba122008-11-20 15:49:46 +0000269/*
270 * 0: readable
271 * 1: writable
272 * 2-6: reserved
273 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800274 * 8-10: available
275 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000276 * 12-63: Host physcial address
277 */
278struct dma_pte {
279 u64 val;
280};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000281
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000282static inline void dma_clear_pte(struct dma_pte *pte)
283{
284 pte->val = 0;
285}
286
287static inline void dma_set_pte_readable(struct dma_pte *pte)
288{
289 pte->val |= DMA_PTE_READ;
290}
291
292static inline void dma_set_pte_writable(struct dma_pte *pte)
293{
294 pte->val |= DMA_PTE_WRITE;
295}
296
Sheng Yang9cf066972009-03-18 15:33:07 +0800297static inline void dma_set_pte_snp(struct dma_pte *pte)
298{
299 pte->val |= DMA_PTE_SNP;
300}
301
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000302static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
303{
304 pte->val = (pte->val & ~3) | (prot & 3);
305}
306
307static inline u64 dma_pte_addr(struct dma_pte *pte)
308{
David Woodhousec85994e2009-07-01 19:21:24 +0100309#ifdef CONFIG_64BIT
310 return pte->val & VTD_PAGE_MASK;
311#else
312 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100313 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100314#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000315}
316
David Woodhousedd4e8312009-06-27 16:21:20 +0100317static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000318{
David Woodhousedd4e8312009-06-27 16:21:20 +0100319 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000320}
321
322static inline bool dma_pte_present(struct dma_pte *pte)
323{
324 return (pte->val & 3) != 0;
325}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000326
Allen Kay4399c8b2011-10-14 12:32:46 -0700327static inline bool dma_pte_superpage(struct dma_pte *pte)
328{
329 return (pte->val & (1 << 7));
330}
331
David Woodhouse75e6bf92009-07-02 11:21:16 +0100332static inline int first_pte_in_page(struct dma_pte *pte)
333{
334 return !((unsigned long)pte & ~VTD_PAGE_MASK);
335}
336
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700337/*
338 * This domain is a statically identity mapping domain.
339 * 1. This domain creats a static 1:1 mapping to all usable memory.
340 * 2. It maps to each iommu if successful.
341 * 3. Each iommu mapps to this domain if successful.
342 */
David Woodhouse19943b02009-08-04 16:19:20 +0100343static struct dmar_domain *si_domain;
344static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700345
Weidong Han3b5410e2008-12-08 09:17:15 +0800346/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100347#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800348
Weidong Han1ce28fe2008-12-08 16:35:39 +0800349/* domain represents a virtual machine, more than one devices
350 * across iommus may be owned in one domain, e.g. kvm guest.
351 */
352#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
353
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700354/* si_domain contains mulitple devices */
355#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
356
Mike Travis1b198bb2012-03-05 15:05:16 -0800357/* define the limit of IOMMUs supported in each domain */
358#ifdef CONFIG_X86
359# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
360#else
361# define IOMMU_UNITS_SUPPORTED 64
362#endif
363
Mark McLoughlin99126f72008-11-20 15:49:47 +0000364struct dmar_domain {
365 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700366 int nid; /* node id */
Mike Travis1b198bb2012-03-05 15:05:16 -0800367 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
368 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000369
370 struct list_head devices; /* all devices' list */
371 struct iova_domain iovad; /* iova's that belong to this domain */
372
373 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000374 int gaw; /* max guest address width */
375
376 /* adjusted guest address width, 0 is level 2 30-bit */
377 int agaw;
378
Weidong Han3b5410e2008-12-08 09:17:15 +0800379 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800380
381 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800382 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800383 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100384 int iommu_superpage;/* Level of superpages supported:
385 0 == 4KiB (no superpages), 1 == 2MiB,
386 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800387 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800388 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000389};
390
Mark McLoughlina647dac2008-11-20 15:49:48 +0000391/* PCI domain-device relationship */
392struct device_domain_info {
393 struct list_head link; /* link to domain siblings */
394 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100395 int segment; /* PCI domain */
396 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000397 u8 devfn; /* PCI devfn number */
Stefan Assmann45e829e2009-12-03 06:49:24 -0500398 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800399 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000400 struct dmar_domain *domain; /* pointer to domain */
401};
402
mark gross5e0d2a62008-03-04 15:22:08 -0800403static void flush_unmaps_timeout(unsigned long data);
404
405DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
406
mark gross80b20dd2008-04-18 13:53:58 -0700407#define HIGH_WATER_MARK 250
408struct deferred_flush_tables {
409 int next;
410 struct iova *iova[HIGH_WATER_MARK];
411 struct dmar_domain *domain[HIGH_WATER_MARK];
412};
413
414static struct deferred_flush_tables *deferred_flush;
415
mark gross5e0d2a62008-03-04 15:22:08 -0800416/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800417static int g_num_of_iommus;
418
419static DEFINE_SPINLOCK(async_umap_flush_lock);
420static LIST_HEAD(unmaps_to_do);
421
422static int timer_on;
423static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800424
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700425static void domain_remove_dev_info(struct dmar_domain *domain);
426
Suresh Siddhad3f13812011-08-23 17:05:25 -0700427#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800428int dmar_disabled = 0;
429#else
430int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700431#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800432
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200433int intel_iommu_enabled = 0;
434EXPORT_SYMBOL_GPL(intel_iommu_enabled);
435
David Woodhouse2d9e6672010-06-15 10:57:57 +0100436static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700437static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800438static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100439static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700440
David Woodhousec0771df2011-10-14 20:59:46 +0100441int intel_iommu_gfx_mapped;
442EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
443
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700444#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
445static DEFINE_SPINLOCK(device_domain_lock);
446static LIST_HEAD(device_domain_list);
447
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100448static struct iommu_ops intel_iommu_ops;
449
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700450static int __init intel_iommu_setup(char *str)
451{
452 if (!str)
453 return -EINVAL;
454 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800455 if (!strncmp(str, "on", 2)) {
456 dmar_disabled = 0;
457 printk(KERN_INFO "Intel-IOMMU: enabled\n");
458 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700459 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800460 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700461 } else if (!strncmp(str, "igfx_off", 8)) {
462 dmar_map_gfx = 0;
463 printk(KERN_INFO
464 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700465 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800466 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700467 "Intel-IOMMU: Forcing DAC for PCI devices\n");
468 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800469 } else if (!strncmp(str, "strict", 6)) {
470 printk(KERN_INFO
471 "Intel-IOMMU: disable batched IOTLB flush\n");
472 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100473 } else if (!strncmp(str, "sp_off", 6)) {
474 printk(KERN_INFO
475 "Intel-IOMMU: disable supported super page\n");
476 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700477 }
478
479 str += strcspn(str, ",");
480 while (*str == ',')
481 str++;
482 }
483 return 0;
484}
485__setup("intel_iommu=", intel_iommu_setup);
486
487static struct kmem_cache *iommu_domain_cache;
488static struct kmem_cache *iommu_devinfo_cache;
489static struct kmem_cache *iommu_iova_cache;
490
Suresh Siddha4c923d42009-10-02 11:01:24 -0700491static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700492{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700493 struct page *page;
494 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700495
Suresh Siddha4c923d42009-10-02 11:01:24 -0700496 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
497 if (page)
498 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700499 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700500}
501
502static inline void free_pgtable_page(void *vaddr)
503{
504 free_page((unsigned long)vaddr);
505}
506
507static inline void *alloc_domain_mem(void)
508{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900509 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700510}
511
Kay, Allen M38717942008-09-09 18:37:29 +0300512static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700513{
514 kmem_cache_free(iommu_domain_cache, vaddr);
515}
516
517static inline void * alloc_devinfo_mem(void)
518{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900519 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700520}
521
522static inline void free_devinfo_mem(void *vaddr)
523{
524 kmem_cache_free(iommu_devinfo_cache, vaddr);
525}
526
527struct iova *alloc_iova_mem(void)
528{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900529 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700530}
531
532void free_iova_mem(struct iova *iova)
533{
534 kmem_cache_free(iommu_iova_cache, iova);
535}
536
Weidong Han1b573682008-12-08 15:34:06 +0800537
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700538static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800539{
540 unsigned long sagaw;
541 int agaw = -1;
542
543 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700544 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800545 agaw >= 0; agaw--) {
546 if (test_bit(agaw, &sagaw))
547 break;
548 }
549
550 return agaw;
551}
552
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700553/*
554 * Calculate max SAGAW for each iommu.
555 */
556int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
557{
558 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
559}
560
561/*
562 * calculate agaw for each iommu.
563 * "SAGAW" may be different across iommus, use a default agaw, and
564 * get a supported less agaw for iommus that don't support the default agaw.
565 */
566int iommu_calculate_agaw(struct intel_iommu *iommu)
567{
568 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
569}
570
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700571/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800572static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
573{
574 int iommu_id;
575
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700576 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800577 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700578 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800579
Mike Travis1b198bb2012-03-05 15:05:16 -0800580 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800581 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
582 return NULL;
583
584 return g_iommus[iommu_id];
585}
586
Weidong Han8e6040972008-12-08 15:49:06 +0800587static void domain_update_iommu_coherency(struct dmar_domain *domain)
588{
589 int i;
590
591 domain->iommu_coherency = 1;
592
Mike Travis1b198bb2012-03-05 15:05:16 -0800593 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Weidong Han8e6040972008-12-08 15:49:06 +0800594 if (!ecap_coherent(g_iommus[i]->ecap)) {
595 domain->iommu_coherency = 0;
596 break;
597 }
Weidong Han8e6040972008-12-08 15:49:06 +0800598 }
599}
600
Sheng Yang58c610b2009-03-18 15:33:05 +0800601static void domain_update_iommu_snooping(struct dmar_domain *domain)
602{
603 int i;
604
605 domain->iommu_snooping = 1;
606
Mike Travis1b198bb2012-03-05 15:05:16 -0800607 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800608 if (!ecap_sc_support(g_iommus[i]->ecap)) {
609 domain->iommu_snooping = 0;
610 break;
611 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800612 }
613}
614
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100615static void domain_update_iommu_superpage(struct dmar_domain *domain)
616{
Allen Kay8140a952011-10-14 12:32:17 -0700617 struct dmar_drhd_unit *drhd;
618 struct intel_iommu *iommu = NULL;
619 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100620
621 if (!intel_iommu_superpage) {
622 domain->iommu_superpage = 0;
623 return;
624 }
625
Allen Kay8140a952011-10-14 12:32:17 -0700626 /* set iommu_superpage to the smallest common denominator */
627 for_each_active_iommu(iommu, drhd) {
628 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100629 if (!mask) {
630 break;
631 }
632 }
633 domain->iommu_superpage = fls(mask);
634}
635
Sheng Yang58c610b2009-03-18 15:33:05 +0800636/* Some capabilities may be different across iommus */
637static void domain_update_iommu_cap(struct dmar_domain *domain)
638{
639 domain_update_iommu_coherency(domain);
640 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100641 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800642}
643
David Woodhouse276dbf992009-04-04 01:45:37 +0100644static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800645{
646 struct dmar_drhd_unit *drhd = NULL;
647 int i;
648
649 for_each_drhd_unit(drhd) {
650 if (drhd->ignored)
651 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100652 if (segment != drhd->segment)
653 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800654
David Woodhouse924b6232009-04-04 00:39:25 +0100655 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000656 if (drhd->devices[i] &&
657 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800658 drhd->devices[i]->devfn == devfn)
659 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700660 if (drhd->devices[i] &&
661 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100662 drhd->devices[i]->subordinate->number <= bus &&
663 drhd->devices[i]->subordinate->subordinate >= bus)
664 return drhd->iommu;
665 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800666
667 if (drhd->include_all)
668 return drhd->iommu;
669 }
670
671 return NULL;
672}
673
Weidong Han5331fe62008-12-08 23:00:00 +0800674static void domain_flush_cache(struct dmar_domain *domain,
675 void *addr, int size)
676{
677 if (!domain->iommu_coherency)
678 clflush_cache_range(addr, size);
679}
680
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700681/* Gets context entry for a given bus and devfn */
682static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
683 u8 bus, u8 devfn)
684{
685 struct root_entry *root;
686 struct context_entry *context;
687 unsigned long phy_addr;
688 unsigned long flags;
689
690 spin_lock_irqsave(&iommu->lock, flags);
691 root = &iommu->root_entry[bus];
692 context = get_context_addr_from_root(root);
693 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700694 context = (struct context_entry *)
695 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700696 if (!context) {
697 spin_unlock_irqrestore(&iommu->lock, flags);
698 return NULL;
699 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700700 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700701 phy_addr = virt_to_phys((void *)context);
702 set_root_value(root, phy_addr);
703 set_root_present(root);
704 __iommu_flush_cache(iommu, root, sizeof(*root));
705 }
706 spin_unlock_irqrestore(&iommu->lock, flags);
707 return &context[devfn];
708}
709
710static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
711{
712 struct root_entry *root;
713 struct context_entry *context;
714 int ret;
715 unsigned long flags;
716
717 spin_lock_irqsave(&iommu->lock, flags);
718 root = &iommu->root_entry[bus];
719 context = get_context_addr_from_root(root);
720 if (!context) {
721 ret = 0;
722 goto out;
723 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000724 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700725out:
726 spin_unlock_irqrestore(&iommu->lock, flags);
727 return ret;
728}
729
730static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
731{
732 struct root_entry *root;
733 struct context_entry *context;
734 unsigned long flags;
735
736 spin_lock_irqsave(&iommu->lock, flags);
737 root = &iommu->root_entry[bus];
738 context = get_context_addr_from_root(root);
739 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000740 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700741 __iommu_flush_cache(iommu, &context[devfn], \
742 sizeof(*context));
743 }
744 spin_unlock_irqrestore(&iommu->lock, flags);
745}
746
747static void free_context_table(struct intel_iommu *iommu)
748{
749 struct root_entry *root;
750 int i;
751 unsigned long flags;
752 struct context_entry *context;
753
754 spin_lock_irqsave(&iommu->lock, flags);
755 if (!iommu->root_entry) {
756 goto out;
757 }
758 for (i = 0; i < ROOT_ENTRY_NR; i++) {
759 root = &iommu->root_entry[i];
760 context = get_context_addr_from_root(root);
761 if (context)
762 free_pgtable_page(context);
763 }
764 free_pgtable_page(iommu->root_entry);
765 iommu->root_entry = NULL;
766out:
767 spin_unlock_irqrestore(&iommu->lock, flags);
768}
769
David Woodhouseb026fd22009-06-28 10:37:25 +0100770static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
Allen Kay4399c8b2011-10-14 12:32:46 -0700771 unsigned long pfn, int target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700772{
David Woodhouseb026fd22009-06-28 10:37:25 +0100773 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700774 struct dma_pte *parent, *pte = NULL;
775 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700776 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700777
778 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100779 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700780 parent = domain->pgd;
781
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700782 while (level > 0) {
783 void *tmp_page;
784
David Woodhouseb026fd22009-06-28 10:37:25 +0100785 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700786 pte = &parent[offset];
Allen Kay4399c8b2011-10-14 12:32:46 -0700787 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100788 break;
789 if (level == target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700790 break;
791
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000792 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100793 uint64_t pteval;
794
Suresh Siddha4c923d42009-10-02 11:01:24 -0700795 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700796
David Woodhouse206a73c12009-07-01 19:30:28 +0100797 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700798 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100799
David Woodhousec85994e2009-07-01 19:21:24 +0100800 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400801 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100802 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
803 /* Someone else set it while we were thinking; use theirs. */
804 free_pgtable_page(tmp_page);
805 } else {
806 dma_pte_addr(pte);
807 domain_flush_cache(domain, pte, sizeof(*pte));
808 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700809 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000810 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700811 level--;
812 }
813
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700814 return pte;
815}
816
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100817
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700818/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100819static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
820 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100821 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700822{
823 struct dma_pte *parent, *pte = NULL;
824 int total = agaw_to_level(domain->agaw);
825 int offset;
826
827 parent = domain->pgd;
828 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100829 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700830 pte = &parent[offset];
831 if (level == total)
832 return pte;
833
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100834 if (!dma_pte_present(pte)) {
835 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700836 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100837 }
838
839 if (pte->val & DMA_PTE_LARGE_PAGE) {
840 *large_page = total;
841 return pte;
842 }
843
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000844 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700845 total--;
846 }
847 return NULL;
848}
849
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700850/* clear last level pte, a tlb flush should be followed */
Allen Kay292827c2011-10-14 12:31:54 -0700851static int dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100852 unsigned long start_pfn,
853 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700854{
David Woodhouse04b18e62009-06-27 19:15:01 +0100855 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100856 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100857 struct dma_pte *first_pte, *pte;
Allen Kay292827c2011-10-14 12:31:54 -0700858 int order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700859
David Woodhouse04b18e62009-06-27 19:15:01 +0100860 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100861 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700862 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100863
David Woodhouse04b18e62009-06-27 19:15:01 +0100864 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700865 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100866 large_page = 1;
867 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100868 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100869 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100870 continue;
871 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100872 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100873 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100874 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100875 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100876 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
877
David Woodhouse310a5ab2009-06-28 18:52:20 +0100878 domain_flush_cache(domain, first_pte,
879 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700880
881 } while (start_pfn && start_pfn <= last_pfn);
Allen Kay292827c2011-10-14 12:31:54 -0700882
883 order = (large_page - 1) * 9;
884 return order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700885}
886
887/* free page table pages. last level pte should already be cleared */
888static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100889 unsigned long start_pfn,
890 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700891{
David Woodhouse6660c632009-06-27 22:41:00 +0100892 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhousef3a0a522009-06-30 03:40:07 +0100893 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700894 int total = agaw_to_level(domain->agaw);
895 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100896 unsigned long tmp;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100897 int large_page = 2;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700898
David Woodhouse6660c632009-06-27 22:41:00 +0100899 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
900 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700901 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700902
David Woodhousef3a0a522009-06-30 03:40:07 +0100903 /* We don't need lock here; nobody else touches the iova range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700904 level = 2;
905 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100906 tmp = align_to_level(start_pfn, level);
907
David Woodhousef3a0a522009-06-30 03:40:07 +0100908 /* If we can't even clear one PTE at this level, we're done */
David Woodhouse6660c632009-06-27 22:41:00 +0100909 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700910 return;
911
David Woodhouse59c36282009-09-19 07:36:28 -0700912 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100913 large_page = level;
914 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
915 if (large_page > level)
916 level = large_page + 1;
David Woodhousef3a0a522009-06-30 03:40:07 +0100917 if (!pte) {
918 tmp = align_to_level(tmp + 1, level + 1);
919 continue;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700920 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100921 do {
David Woodhouse6a43e572009-07-02 12:02:34 +0100922 if (dma_pte_present(pte)) {
923 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
924 dma_clear_pte(pte);
925 }
David Woodhousef3a0a522009-06-30 03:40:07 +0100926 pte++;
927 tmp += level_size(level);
David Woodhouse75e6bf92009-07-02 11:21:16 +0100928 } while (!first_pte_in_page(pte) &&
929 tmp + level_size(level) - 1 <= last_pfn);
930
David Woodhousef3a0a522009-06-30 03:40:07 +0100931 domain_flush_cache(domain, first_pte,
932 (void *)pte - (void *)first_pte);
933
David Woodhouse59c36282009-09-19 07:36:28 -0700934 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700935 level++;
936 }
937 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100938 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700939 free_pgtable_page(domain->pgd);
940 domain->pgd = NULL;
941 }
942}
943
944/* iommu handling */
945static int iommu_alloc_root_entry(struct intel_iommu *iommu)
946{
947 struct root_entry *root;
948 unsigned long flags;
949
Suresh Siddha4c923d42009-10-02 11:01:24 -0700950 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700951 if (!root)
952 return -ENOMEM;
953
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700954 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700955
956 spin_lock_irqsave(&iommu->lock, flags);
957 iommu->root_entry = root;
958 spin_unlock_irqrestore(&iommu->lock, flags);
959
960 return 0;
961}
962
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700963static void iommu_set_root_entry(struct intel_iommu *iommu)
964{
965 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100966 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700967 unsigned long flag;
968
969 addr = iommu->root_entry;
970
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200971 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700972 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
973
David Woodhousec416daa2009-05-10 20:30:58 +0100974 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700975
976 /* Make sure hardware complete it */
977 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100978 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700979
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200980 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700981}
982
983static void iommu_flush_write_buffer(struct intel_iommu *iommu)
984{
985 u32 val;
986 unsigned long flag;
987
David Woodhouse9af88142009-02-13 23:18:03 +0000988 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700989 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700990
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200991 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100992 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700993
994 /* Make sure hardware complete it */
995 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100996 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700997
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200998 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700999}
1000
1001/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001002static void __iommu_flush_context(struct intel_iommu *iommu,
1003 u16 did, u16 source_id, u8 function_mask,
1004 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001005{
1006 u64 val = 0;
1007 unsigned long flag;
1008
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001009 switch (type) {
1010 case DMA_CCMD_GLOBAL_INVL:
1011 val = DMA_CCMD_GLOBAL_INVL;
1012 break;
1013 case DMA_CCMD_DOMAIN_INVL:
1014 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1015 break;
1016 case DMA_CCMD_DEVICE_INVL:
1017 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1018 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1019 break;
1020 default:
1021 BUG();
1022 }
1023 val |= DMA_CCMD_ICC;
1024
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001025 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001026 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1027
1028 /* Make sure hardware complete it */
1029 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1030 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1031
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001032 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001033}
1034
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001035/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001036static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1037 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001038{
1039 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1040 u64 val = 0, val_iva = 0;
1041 unsigned long flag;
1042
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001043 switch (type) {
1044 case DMA_TLB_GLOBAL_FLUSH:
1045 /* global flush doesn't need set IVA_REG */
1046 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1047 break;
1048 case DMA_TLB_DSI_FLUSH:
1049 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1050 break;
1051 case DMA_TLB_PSI_FLUSH:
1052 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1053 /* Note: always flush non-leaf currently */
1054 val_iva = size_order | addr;
1055 break;
1056 default:
1057 BUG();
1058 }
1059 /* Note: set drain read/write */
1060#if 0
1061 /*
1062 * This is probably to be super secure.. Looks like we can
1063 * ignore it without any impact.
1064 */
1065 if (cap_read_drain(iommu->cap))
1066 val |= DMA_TLB_READ_DRAIN;
1067#endif
1068 if (cap_write_drain(iommu->cap))
1069 val |= DMA_TLB_WRITE_DRAIN;
1070
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001071 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001072 /* Note: Only uses first TLB reg currently */
1073 if (val_iva)
1074 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1075 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1076
1077 /* Make sure hardware complete it */
1078 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1079 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1080
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001081 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001082
1083 /* check IOTLB invalidation granularity */
1084 if (DMA_TLB_IAIG(val) == 0)
1085 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1086 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1087 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001088 (unsigned long long)DMA_TLB_IIRG(type),
1089 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001090}
1091
Yu Zhao93a23a72009-05-18 13:51:37 +08001092static struct device_domain_info *iommu_support_dev_iotlb(
1093 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001094{
Yu Zhao93a23a72009-05-18 13:51:37 +08001095 int found = 0;
1096 unsigned long flags;
1097 struct device_domain_info *info;
1098 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1099
1100 if (!ecap_dev_iotlb_support(iommu->ecap))
1101 return NULL;
1102
1103 if (!iommu->qi)
1104 return NULL;
1105
1106 spin_lock_irqsave(&device_domain_lock, flags);
1107 list_for_each_entry(info, &domain->devices, link)
1108 if (info->bus == bus && info->devfn == devfn) {
1109 found = 1;
1110 break;
1111 }
1112 spin_unlock_irqrestore(&device_domain_lock, flags);
1113
1114 if (!found || !info->dev)
1115 return NULL;
1116
1117 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1118 return NULL;
1119
1120 if (!dmar_find_matched_atsr_unit(info->dev))
1121 return NULL;
1122
1123 info->iommu = iommu;
1124
1125 return info;
1126}
1127
1128static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1129{
1130 if (!info)
1131 return;
1132
1133 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1134}
1135
1136static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1137{
1138 if (!info->dev || !pci_ats_enabled(info->dev))
1139 return;
1140
1141 pci_disable_ats(info->dev);
1142}
1143
1144static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1145 u64 addr, unsigned mask)
1146{
1147 u16 sid, qdep;
1148 unsigned long flags;
1149 struct device_domain_info *info;
1150
1151 spin_lock_irqsave(&device_domain_lock, flags);
1152 list_for_each_entry(info, &domain->devices, link) {
1153 if (!info->dev || !pci_ats_enabled(info->dev))
1154 continue;
1155
1156 sid = info->bus << 8 | info->devfn;
1157 qdep = pci_ats_queue_depth(info->dev);
1158 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1159 }
1160 spin_unlock_irqrestore(&device_domain_lock, flags);
1161}
1162
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001163static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
Nadav Amit82653632010-04-01 13:24:40 +03001164 unsigned long pfn, unsigned int pages, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001165{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001166 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001167 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001168
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001169 BUG_ON(pages == 0);
1170
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001171 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001172 * Fallback to domain selective flush if no PSI support or the size is
1173 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001174 * PSI requires page size to be 2 ^ x, and the base address is naturally
1175 * aligned to the size
1176 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001177 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1178 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001179 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001180 else
1181 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1182 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001183
1184 /*
Nadav Amit82653632010-04-01 13:24:40 +03001185 * In caching mode, changes of pages from non-present to present require
1186 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001187 */
Nadav Amit82653632010-04-01 13:24:40 +03001188 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001189 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001190}
1191
mark grossf8bab732008-02-08 04:18:38 -08001192static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1193{
1194 u32 pmen;
1195 unsigned long flags;
1196
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001197 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001198 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1199 pmen &= ~DMA_PMEN_EPM;
1200 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1201
1202 /* wait for the protected region status bit to clear */
1203 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1204 readl, !(pmen & DMA_PMEN_PRS), pmen);
1205
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001206 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001207}
1208
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001209static int iommu_enable_translation(struct intel_iommu *iommu)
1210{
1211 u32 sts;
1212 unsigned long flags;
1213
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001214 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001215 iommu->gcmd |= DMA_GCMD_TE;
1216 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001217
1218 /* Make sure hardware complete it */
1219 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001220 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001221
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001222 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001223 return 0;
1224}
1225
1226static int iommu_disable_translation(struct intel_iommu *iommu)
1227{
1228 u32 sts;
1229 unsigned long flag;
1230
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001231 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001232 iommu->gcmd &= ~DMA_GCMD_TE;
1233 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1234
1235 /* Make sure hardware complete it */
1236 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001237 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001238
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001239 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001240 return 0;
1241}
1242
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001243
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001244static int iommu_init_domains(struct intel_iommu *iommu)
1245{
1246 unsigned long ndomains;
1247 unsigned long nlongs;
1248
1249 ndomains = cap_ndoms(iommu->cap);
Masanari Iida68aeb962012-01-25 00:25:52 +09001250 pr_debug("IOMMU %d: Number of Domains supported <%ld>\n", iommu->seq_id,
Yinghai Lu680a7522010-04-08 19:58:23 +01001251 ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001252 nlongs = BITS_TO_LONGS(ndomains);
1253
Donald Dutile94a91b52009-08-20 16:51:34 -04001254 spin_lock_init(&iommu->lock);
1255
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001256 /* TBD: there might be 64K domains,
1257 * consider other allocation for future chip
1258 */
1259 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1260 if (!iommu->domain_ids) {
1261 printk(KERN_ERR "Allocating domain id array failed\n");
1262 return -ENOMEM;
1263 }
1264 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1265 GFP_KERNEL);
1266 if (!iommu->domains) {
1267 printk(KERN_ERR "Allocating domain array failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001268 return -ENOMEM;
1269 }
1270
1271 /*
1272 * if Caching mode is set, then invalid translations are tagged
1273 * with domainid 0. Hence we need to pre-allocate it.
1274 */
1275 if (cap_caching_mode(iommu->cap))
1276 set_bit(0, iommu->domain_ids);
1277 return 0;
1278}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001279
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001280
1281static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001282static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001283
1284void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001285{
1286 struct dmar_domain *domain;
1287 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001288 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001289
Donald Dutile94a91b52009-08-20 16:51:34 -04001290 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001291 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Donald Dutile94a91b52009-08-20 16:51:34 -04001292 domain = iommu->domains[i];
1293 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001294
Donald Dutile94a91b52009-08-20 16:51:34 -04001295 spin_lock_irqsave(&domain->iommu_lock, flags);
1296 if (--domain->iommu_count == 0) {
1297 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1298 vm_domain_exit(domain);
1299 else
1300 domain_exit(domain);
1301 }
1302 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001303 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001304 }
1305
1306 if (iommu->gcmd & DMA_GCMD_TE)
1307 iommu_disable_translation(iommu);
1308
1309 if (iommu->irq) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001310 irq_set_handler_data(iommu->irq, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001311 /* This will mask the irq */
1312 free_irq(iommu->irq, iommu);
1313 destroy_irq(iommu->irq);
1314 }
1315
1316 kfree(iommu->domains);
1317 kfree(iommu->domain_ids);
1318
Weidong Hand9630fe2008-12-08 11:06:32 +08001319 g_iommus[iommu->seq_id] = NULL;
1320
1321 /* if all iommus are freed, free g_iommus */
1322 for (i = 0; i < g_num_of_iommus; i++) {
1323 if (g_iommus[i])
1324 break;
1325 }
1326
1327 if (i == g_num_of_iommus)
1328 kfree(g_iommus);
1329
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001330 /* free context mapping */
1331 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001332}
1333
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001334static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001335{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001336 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001337
1338 domain = alloc_domain_mem();
1339 if (!domain)
1340 return NULL;
1341
Suresh Siddha4c923d42009-10-02 11:01:24 -07001342 domain->nid = -1;
Mike Travis1b198bb2012-03-05 15:05:16 -08001343 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Hand71a2f32008-12-07 21:13:41 +08001344 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001345
1346 return domain;
1347}
1348
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001349static int iommu_attach_domain(struct dmar_domain *domain,
1350 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001351{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001352 int num;
1353 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001354 unsigned long flags;
1355
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001356 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001357
1358 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001359
1360 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1361 if (num >= ndomains) {
1362 spin_unlock_irqrestore(&iommu->lock, flags);
1363 printk(KERN_ERR "IOMMU: no free domain ids\n");
1364 return -ENOMEM;
1365 }
1366
1367 domain->id = num;
1368 set_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001369 set_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001370 iommu->domains[num] = domain;
1371 spin_unlock_irqrestore(&iommu->lock, flags);
1372
1373 return 0;
1374}
1375
1376static void iommu_detach_domain(struct dmar_domain *domain,
1377 struct intel_iommu *iommu)
1378{
1379 unsigned long flags;
1380 int num, ndomains;
1381 int found = 0;
1382
1383 spin_lock_irqsave(&iommu->lock, flags);
1384 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001385 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001386 if (iommu->domains[num] == domain) {
1387 found = 1;
1388 break;
1389 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001390 }
1391
1392 if (found) {
1393 clear_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001394 clear_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001395 iommu->domains[num] = NULL;
1396 }
Weidong Han8c11e792008-12-08 15:29:22 +08001397 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001398}
1399
1400static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001401static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001402
Joseph Cihula51a63e62011-03-21 11:04:24 -07001403static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001404{
1405 struct pci_dev *pdev = NULL;
1406 struct iova *iova;
1407 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001408
David Millerf6611972008-02-06 01:36:23 -08001409 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001410
Mark Gross8a443df2008-03-04 14:59:31 -08001411 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1412 &reserved_rbtree_key);
1413
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001414 /* IOAPIC ranges shouldn't be accessed by DMA */
1415 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1416 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001417 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001418 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001419 return -ENODEV;
1420 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001421
1422 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1423 for_each_pci_dev(pdev) {
1424 struct resource *r;
1425
1426 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1427 r = &pdev->resource[i];
1428 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1429 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001430 iova = reserve_iova(&reserved_iova_list,
1431 IOVA_PFN(r->start),
1432 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001433 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001434 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001435 return -ENODEV;
1436 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001437 }
1438 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001439 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001440}
1441
1442static void domain_reserve_special_ranges(struct dmar_domain *domain)
1443{
1444 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1445}
1446
1447static inline int guestwidth_to_adjustwidth(int gaw)
1448{
1449 int agaw;
1450 int r = (gaw - 12) % 9;
1451
1452 if (r == 0)
1453 agaw = gaw;
1454 else
1455 agaw = gaw + 9 - r;
1456 if (agaw > 64)
1457 agaw = 64;
1458 return agaw;
1459}
1460
1461static int domain_init(struct dmar_domain *domain, int guest_width)
1462{
1463 struct intel_iommu *iommu;
1464 int adjust_width, agaw;
1465 unsigned long sagaw;
1466
David Millerf6611972008-02-06 01:36:23 -08001467 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001468 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001469
1470 domain_reserve_special_ranges(domain);
1471
1472 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001473 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001474 if (guest_width > cap_mgaw(iommu->cap))
1475 guest_width = cap_mgaw(iommu->cap);
1476 domain->gaw = guest_width;
1477 adjust_width = guestwidth_to_adjustwidth(guest_width);
1478 agaw = width_to_agaw(adjust_width);
1479 sagaw = cap_sagaw(iommu->cap);
1480 if (!test_bit(agaw, &sagaw)) {
1481 /* hardware doesn't support it, choose a bigger one */
1482 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1483 agaw = find_next_bit(&sagaw, 5, agaw);
1484 if (agaw >= 5)
1485 return -ENODEV;
1486 }
1487 domain->agaw = agaw;
1488 INIT_LIST_HEAD(&domain->devices);
1489
Weidong Han8e6040972008-12-08 15:49:06 +08001490 if (ecap_coherent(iommu->ecap))
1491 domain->iommu_coherency = 1;
1492 else
1493 domain->iommu_coherency = 0;
1494
Sheng Yang58c610b2009-03-18 15:33:05 +08001495 if (ecap_sc_support(iommu->ecap))
1496 domain->iommu_snooping = 1;
1497 else
1498 domain->iommu_snooping = 0;
1499
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001500 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
Weidong Hanc7151a82008-12-08 22:51:37 +08001501 domain->iommu_count = 1;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001502 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001503
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001504 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001505 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001506 if (!domain->pgd)
1507 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001508 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001509 return 0;
1510}
1511
1512static void domain_exit(struct dmar_domain *domain)
1513{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001514 struct dmar_drhd_unit *drhd;
1515 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001516
1517 /* Domain 0 is reserved, so dont process it */
1518 if (!domain)
1519 return;
1520
Alex Williamson7b668352011-05-24 12:02:41 +01001521 /* Flush any lazy unmaps that may reference this domain */
1522 if (!intel_iommu_strict)
1523 flush_unmaps_timeout(0);
1524
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001525 domain_remove_dev_info(domain);
1526 /* destroy iovas */
1527 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001528
1529 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001530 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001531
1532 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001533 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001534
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001535 for_each_active_iommu(iommu, drhd)
Mike Travis1b198bb2012-03-05 15:05:16 -08001536 if (test_bit(iommu->seq_id, domain->iommu_bmp))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001537 iommu_detach_domain(domain, iommu);
1538
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001539 free_domain_mem(domain);
1540}
1541
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001542static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1543 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001544{
1545 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001546 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001547 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001548 struct dma_pte *pgd;
1549 unsigned long num;
1550 unsigned long ndomains;
1551 int id;
1552 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001553 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001554
1555 pr_debug("Set context mapping for %02x:%02x.%d\n",
1556 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001557
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001558 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001559 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1560 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001561
David Woodhouse276dbf992009-04-04 01:45:37 +01001562 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001563 if (!iommu)
1564 return -ENODEV;
1565
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001566 context = device_to_context_entry(iommu, bus, devfn);
1567 if (!context)
1568 return -ENOMEM;
1569 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001570 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001571 spin_unlock_irqrestore(&iommu->lock, flags);
1572 return 0;
1573 }
1574
Weidong Hanea6606b2008-12-08 23:08:15 +08001575 id = domain->id;
1576 pgd = domain->pgd;
1577
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001578 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1579 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001580 int found = 0;
1581
1582 /* find an available domain id for this device in iommu */
1583 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001584 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001585 if (iommu->domains[num] == domain) {
1586 id = num;
1587 found = 1;
1588 break;
1589 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001590 }
1591
1592 if (found == 0) {
1593 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1594 if (num >= ndomains) {
1595 spin_unlock_irqrestore(&iommu->lock, flags);
1596 printk(KERN_ERR "IOMMU: no free domain ids\n");
1597 return -EFAULT;
1598 }
1599
1600 set_bit(num, iommu->domain_ids);
1601 iommu->domains[num] = domain;
1602 id = num;
1603 }
1604
1605 /* Skip top levels of page tables for
1606 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001607 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001608 */
Chris Wright1672af12009-12-02 12:06:34 -08001609 if (translation != CONTEXT_TT_PASS_THROUGH) {
1610 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1611 pgd = phys_to_virt(dma_pte_addr(pgd));
1612 if (!dma_pte_present(pgd)) {
1613 spin_unlock_irqrestore(&iommu->lock, flags);
1614 return -ENOMEM;
1615 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001616 }
1617 }
1618 }
1619
1620 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001621
Yu Zhao93a23a72009-05-18 13:51:37 +08001622 if (translation != CONTEXT_TT_PASS_THROUGH) {
1623 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1624 translation = info ? CONTEXT_TT_DEV_IOTLB :
1625 CONTEXT_TT_MULTI_LEVEL;
1626 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001627 /*
1628 * In pass through mode, AW must be programmed to indicate the largest
1629 * AGAW value supported by hardware. And ASR is ignored by hardware.
1630 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001631 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001632 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001633 else {
1634 context_set_address_root(context, virt_to_phys(pgd));
1635 context_set_address_width(context, iommu->agaw);
1636 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001637
1638 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001639 context_set_fault_enable(context);
1640 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001641 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001642
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001643 /*
1644 * It's a non-present to present mapping. If hardware doesn't cache
1645 * non-present entry we only need to flush the write-buffer. If the
1646 * _does_ cache non-present entries, then it does so in the special
1647 * domain #0, which we have to flush:
1648 */
1649 if (cap_caching_mode(iommu->cap)) {
1650 iommu->flush.flush_context(iommu, 0,
1651 (((u16)bus) << 8) | devfn,
1652 DMA_CCMD_MASK_NOBIT,
1653 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001654 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001655 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001656 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001657 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001658 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001659 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001660
1661 spin_lock_irqsave(&domain->iommu_lock, flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08001662 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08001663 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001664 if (domain->iommu_count == 1)
1665 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001666 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001667 }
1668 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001669 return 0;
1670}
1671
1672static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001673domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1674 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001675{
1676 int ret;
1677 struct pci_dev *tmp, *parent;
1678
David Woodhouse276dbf992009-04-04 01:45:37 +01001679 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001680 pdev->bus->number, pdev->devfn,
1681 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001682 if (ret)
1683 return ret;
1684
1685 /* dependent device mapping */
1686 tmp = pci_find_upstream_pcie_bridge(pdev);
1687 if (!tmp)
1688 return 0;
1689 /* Secondary interface's bus number and devfn 0 */
1690 parent = pdev->bus->self;
1691 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001692 ret = domain_context_mapping_one(domain,
1693 pci_domain_nr(parent->bus),
1694 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001695 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001696 if (ret)
1697 return ret;
1698 parent = parent->bus->self;
1699 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001700 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001701 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001702 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001703 tmp->subordinate->number, 0,
1704 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001705 else /* this is a legacy PCI bridge */
1706 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001707 pci_domain_nr(tmp->bus),
1708 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001709 tmp->devfn,
1710 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001711}
1712
Weidong Han5331fe62008-12-08 23:00:00 +08001713static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001714{
1715 int ret;
1716 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001717 struct intel_iommu *iommu;
1718
David Woodhouse276dbf992009-04-04 01:45:37 +01001719 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1720 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001721 if (!iommu)
1722 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001723
David Woodhouse276dbf992009-04-04 01:45:37 +01001724 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001725 if (!ret)
1726 return ret;
1727 /* dependent device mapping */
1728 tmp = pci_find_upstream_pcie_bridge(pdev);
1729 if (!tmp)
1730 return ret;
1731 /* Secondary interface's bus number and devfn 0 */
1732 parent = pdev->bus->self;
1733 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001734 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001735 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001736 if (!ret)
1737 return ret;
1738 parent = parent->bus->self;
1739 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001740 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001741 return device_context_mapped(iommu, tmp->subordinate->number,
1742 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001743 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001744 return device_context_mapped(iommu, tmp->bus->number,
1745 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001746}
1747
Fenghua Yuf5329592009-08-04 15:09:37 -07001748/* Returns a number of VTD pages, but aligned to MM page size */
1749static inline unsigned long aligned_nrpages(unsigned long host_addr,
1750 size_t size)
1751{
1752 host_addr &= ~PAGE_MASK;
1753 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1754}
1755
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001756/* Return largest possible superpage level for a given mapping */
1757static inline int hardware_largepage_caps(struct dmar_domain *domain,
1758 unsigned long iov_pfn,
1759 unsigned long phy_pfn,
1760 unsigned long pages)
1761{
1762 int support, level = 1;
1763 unsigned long pfnmerge;
1764
1765 support = domain->iommu_superpage;
1766
1767 /* To use a large page, the virtual *and* physical addresses
1768 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1769 of them will mean we have to use smaller pages. So just
1770 merge them and check both at once. */
1771 pfnmerge = iov_pfn | phy_pfn;
1772
1773 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1774 pages >>= VTD_STRIDE_SHIFT;
1775 if (!pages)
1776 break;
1777 pfnmerge >>= VTD_STRIDE_SHIFT;
1778 level++;
1779 support--;
1780 }
1781 return level;
1782}
1783
David Woodhouse9051aa02009-06-29 12:30:54 +01001784static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1785 struct scatterlist *sg, unsigned long phys_pfn,
1786 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001787{
1788 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001789 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001790 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001791 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001792 unsigned int largepage_lvl = 0;
1793 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001794
1795 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1796
1797 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1798 return -EINVAL;
1799
1800 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1801
David Woodhouse9051aa02009-06-29 12:30:54 +01001802 if (sg)
1803 sg_res = 0;
1804 else {
1805 sg_res = nr_pages + 1;
1806 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1807 }
1808
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001809 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001810 uint64_t tmp;
1811
David Woodhousee1605492009-06-29 11:17:38 +01001812 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001813 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001814 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1815 sg->dma_length = sg->length;
1816 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001817 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001818 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001819
David Woodhousee1605492009-06-29 11:17:38 +01001820 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001821 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1822
1823 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01001824 if (!pte)
1825 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001826 /* It is large page*/
1827 if (largepage_lvl > 1)
1828 pteval |= DMA_PTE_LARGE_PAGE;
1829 else
1830 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
1831
David Woodhousee1605492009-06-29 11:17:38 +01001832 }
1833 /* We don't need lock here, nobody else
1834 * touches the iova range
1835 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001836 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001837 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001838 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001839 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1840 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001841 if (dumps) {
1842 dumps--;
1843 debug_dma_dump_mappings(NULL);
1844 }
1845 WARN_ON(1);
1846 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001847
1848 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1849
1850 BUG_ON(nr_pages < lvl_pages);
1851 BUG_ON(sg_res < lvl_pages);
1852
1853 nr_pages -= lvl_pages;
1854 iov_pfn += lvl_pages;
1855 phys_pfn += lvl_pages;
1856 pteval += lvl_pages * VTD_PAGE_SIZE;
1857 sg_res -= lvl_pages;
1858
1859 /* If the next PTE would be the first in a new page, then we
1860 need to flush the cache on the entries we've just written.
1861 And then we'll need to recalculate 'pte', so clear it and
1862 let it get set again in the if (!pte) block above.
1863
1864 If we're done (!nr_pages) we need to flush the cache too.
1865
1866 Also if we've been setting superpages, we may need to
1867 recalculate 'pte' and switch back to smaller pages for the
1868 end of the mapping, if the trailing size is not enough to
1869 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01001870 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001871 if (!nr_pages || first_pte_in_page(pte) ||
1872 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01001873 domain_flush_cache(domain, first_pte,
1874 (void *)pte - (void *)first_pte);
1875 pte = NULL;
1876 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001877
1878 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01001879 sg = sg_next(sg);
1880 }
1881 return 0;
1882}
1883
David Woodhouse9051aa02009-06-29 12:30:54 +01001884static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1885 struct scatterlist *sg, unsigned long nr_pages,
1886 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001887{
David Woodhouse9051aa02009-06-29 12:30:54 +01001888 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1889}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001890
David Woodhouse9051aa02009-06-29 12:30:54 +01001891static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1892 unsigned long phys_pfn, unsigned long nr_pages,
1893 int prot)
1894{
1895 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001896}
1897
Weidong Hanc7151a82008-12-08 22:51:37 +08001898static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001899{
Weidong Hanc7151a82008-12-08 22:51:37 +08001900 if (!iommu)
1901 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001902
1903 clear_context_table(iommu, bus, devfn);
1904 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001905 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001906 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001907}
1908
1909static void domain_remove_dev_info(struct dmar_domain *domain)
1910{
1911 struct device_domain_info *info;
1912 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001913 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001914
1915 spin_lock_irqsave(&device_domain_lock, flags);
1916 while (!list_empty(&domain->devices)) {
1917 info = list_entry(domain->devices.next,
1918 struct device_domain_info, link);
1919 list_del(&info->link);
1920 list_del(&info->global);
1921 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001922 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001923 spin_unlock_irqrestore(&device_domain_lock, flags);
1924
Yu Zhao93a23a72009-05-18 13:51:37 +08001925 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001926 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001927 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001928 free_devinfo_mem(info);
1929
1930 spin_lock_irqsave(&device_domain_lock, flags);
1931 }
1932 spin_unlock_irqrestore(&device_domain_lock, flags);
1933}
1934
1935/*
1936 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001937 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001938 */
Kay, Allen M38717942008-09-09 18:37:29 +03001939static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001940find_domain(struct pci_dev *pdev)
1941{
1942 struct device_domain_info *info;
1943
1944 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001945 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001946 if (info)
1947 return info->domain;
1948 return NULL;
1949}
1950
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001951/* domain is initialized */
1952static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1953{
1954 struct dmar_domain *domain, *found = NULL;
1955 struct intel_iommu *iommu;
1956 struct dmar_drhd_unit *drhd;
1957 struct device_domain_info *info, *tmp;
1958 struct pci_dev *dev_tmp;
1959 unsigned long flags;
1960 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001961 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001962 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001963
1964 domain = find_domain(pdev);
1965 if (domain)
1966 return domain;
1967
David Woodhouse276dbf992009-04-04 01:45:37 +01001968 segment = pci_domain_nr(pdev->bus);
1969
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001970 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1971 if (dev_tmp) {
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001972 if (pci_is_pcie(dev_tmp)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001973 bus = dev_tmp->subordinate->number;
1974 devfn = 0;
1975 } else {
1976 bus = dev_tmp->bus->number;
1977 devfn = dev_tmp->devfn;
1978 }
1979 spin_lock_irqsave(&device_domain_lock, flags);
1980 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001981 if (info->segment == segment &&
1982 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001983 found = info->domain;
1984 break;
1985 }
1986 }
1987 spin_unlock_irqrestore(&device_domain_lock, flags);
1988 /* pcie-pci bridge already has a domain, uses it */
1989 if (found) {
1990 domain = found;
1991 goto found_domain;
1992 }
1993 }
1994
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001995 domain = alloc_domain();
1996 if (!domain)
1997 goto error;
1998
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001999 /* Allocate new domain for the device */
2000 drhd = dmar_find_matched_drhd_unit(pdev);
2001 if (!drhd) {
2002 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
2003 pci_name(pdev));
2004 return NULL;
2005 }
2006 iommu = drhd->iommu;
2007
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002008 ret = iommu_attach_domain(domain, iommu);
2009 if (ret) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002010 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002011 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002012 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002013
2014 if (domain_init(domain, gaw)) {
2015 domain_exit(domain);
2016 goto error;
2017 }
2018
2019 /* register pcie-to-pci device */
2020 if (dev_tmp) {
2021 info = alloc_devinfo_mem();
2022 if (!info) {
2023 domain_exit(domain);
2024 goto error;
2025 }
David Woodhouse276dbf992009-04-04 01:45:37 +01002026 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002027 info->bus = bus;
2028 info->devfn = devfn;
2029 info->dev = NULL;
2030 info->domain = domain;
2031 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08002032 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002033
2034 /* pcie-to-pci bridge already has a domain, uses it */
2035 found = NULL;
2036 spin_lock_irqsave(&device_domain_lock, flags);
2037 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01002038 if (tmp->segment == segment &&
2039 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002040 found = tmp->domain;
2041 break;
2042 }
2043 }
2044 if (found) {
Jiri Slaby00dfff72010-06-14 17:17:32 +02002045 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002046 free_devinfo_mem(info);
2047 domain_exit(domain);
2048 domain = found;
2049 } else {
2050 list_add(&info->link, &domain->devices);
2051 list_add(&info->global, &device_domain_list);
Jiri Slaby00dfff72010-06-14 17:17:32 +02002052 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002053 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002054 }
2055
2056found_domain:
2057 info = alloc_devinfo_mem();
2058 if (!info)
2059 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01002060 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002061 info->bus = pdev->bus->number;
2062 info->devfn = pdev->devfn;
2063 info->dev = pdev;
2064 info->domain = domain;
2065 spin_lock_irqsave(&device_domain_lock, flags);
2066 /* somebody is fast */
2067 found = find_domain(pdev);
2068 if (found != NULL) {
2069 spin_unlock_irqrestore(&device_domain_lock, flags);
2070 if (found != domain) {
2071 domain_exit(domain);
2072 domain = found;
2073 }
2074 free_devinfo_mem(info);
2075 return domain;
2076 }
2077 list_add(&info->link, &domain->devices);
2078 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002079 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002080 spin_unlock_irqrestore(&device_domain_lock, flags);
2081 return domain;
2082error:
2083 /* recheck it here, maybe others set it */
2084 return find_domain(pdev);
2085}
2086
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002087static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002088#define IDENTMAP_ALL 1
2089#define IDENTMAP_GFX 2
2090#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002091
David Woodhouseb2132032009-06-26 18:50:28 +01002092static int iommu_domain_identity_map(struct dmar_domain *domain,
2093 unsigned long long start,
2094 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002095{
David Woodhousec5395d52009-06-28 16:35:56 +01002096 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2097 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002098
David Woodhousec5395d52009-06-28 16:35:56 +01002099 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2100 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002101 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002102 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002103 }
2104
David Woodhousec5395d52009-06-28 16:35:56 +01002105 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2106 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002107 /*
2108 * RMRR range might have overlap with physical memory range,
2109 * clear it first
2110 */
David Woodhousec5395d52009-06-28 16:35:56 +01002111 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002112
David Woodhousec5395d52009-06-28 16:35:56 +01002113 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2114 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002115 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002116}
2117
2118static int iommu_prepare_identity_map(struct pci_dev *pdev,
2119 unsigned long long start,
2120 unsigned long long end)
2121{
2122 struct dmar_domain *domain;
2123 int ret;
2124
David Woodhousec7ab48d2009-06-26 19:10:36 +01002125 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002126 if (!domain)
2127 return -ENOMEM;
2128
David Woodhouse19943b02009-08-04 16:19:20 +01002129 /* For _hardware_ passthrough, don't bother. But for software
2130 passthrough, we do it anyway -- it may indicate a memory
2131 range which is reserved in E820, so which didn't get set
2132 up to start with in si_domain */
2133 if (domain == si_domain && hw_pass_through) {
2134 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2135 pci_name(pdev), start, end);
2136 return 0;
2137 }
2138
2139 printk(KERN_INFO
2140 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2141 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002142
David Woodhouse5595b522009-12-02 09:21:55 +00002143 if (end < start) {
2144 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2145 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2146 dmi_get_system_info(DMI_BIOS_VENDOR),
2147 dmi_get_system_info(DMI_BIOS_VERSION),
2148 dmi_get_system_info(DMI_PRODUCT_VERSION));
2149 ret = -EIO;
2150 goto error;
2151 }
2152
David Woodhouse2ff729f2009-08-26 14:25:41 +01002153 if (end >> agaw_to_width(domain->agaw)) {
2154 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2155 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2156 agaw_to_width(domain->agaw),
2157 dmi_get_system_info(DMI_BIOS_VENDOR),
2158 dmi_get_system_info(DMI_BIOS_VERSION),
2159 dmi_get_system_info(DMI_PRODUCT_VERSION));
2160 ret = -EIO;
2161 goto error;
2162 }
David Woodhouse19943b02009-08-04 16:19:20 +01002163
David Woodhouseb2132032009-06-26 18:50:28 +01002164 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002165 if (ret)
2166 goto error;
2167
2168 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002169 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002170 if (ret)
2171 goto error;
2172
2173 return 0;
2174
2175 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002176 domain_exit(domain);
2177 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002178}
2179
2180static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2181 struct pci_dev *pdev)
2182{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002183 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002184 return 0;
2185 return iommu_prepare_identity_map(pdev, rmrr->base_address,
David Woodhouse70e535d2011-05-31 00:22:52 +01002186 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002187}
2188
Suresh Siddhad3f13812011-08-23 17:05:25 -07002189#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002190static inline void iommu_prepare_isa(void)
2191{
2192 struct pci_dev *pdev;
2193 int ret;
2194
2195 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2196 if (!pdev)
2197 return;
2198
David Woodhousec7ab48d2009-06-26 19:10:36 +01002199 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse70e535d2011-05-31 00:22:52 +01002200 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002201
2202 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002203 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2204 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002205
2206}
2207#else
2208static inline void iommu_prepare_isa(void)
2209{
2210 return;
2211}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002212#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002213
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002214static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002215
Matt Kraai071e1372009-08-23 22:30:22 -07002216static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002217{
2218 struct dmar_drhd_unit *drhd;
2219 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002220 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002221
2222 si_domain = alloc_domain();
2223 if (!si_domain)
2224 return -EFAULT;
2225
David Woodhousec7ab48d2009-06-26 19:10:36 +01002226 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002227
2228 for_each_active_iommu(iommu, drhd) {
2229 ret = iommu_attach_domain(si_domain, iommu);
2230 if (ret) {
2231 domain_exit(si_domain);
2232 return -EFAULT;
2233 }
2234 }
2235
2236 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2237 domain_exit(si_domain);
2238 return -EFAULT;
2239 }
2240
2241 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2242
David Woodhouse19943b02009-08-04 16:19:20 +01002243 if (hw)
2244 return 0;
2245
David Woodhousec7ab48d2009-06-26 19:10:36 +01002246 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002247 unsigned long start_pfn, end_pfn;
2248 int i;
2249
2250 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2251 ret = iommu_domain_identity_map(si_domain,
2252 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2253 if (ret)
2254 return ret;
2255 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002256 }
2257
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002258 return 0;
2259}
2260
2261static void domain_remove_one_dev_info(struct dmar_domain *domain,
2262 struct pci_dev *pdev);
2263static int identity_mapping(struct pci_dev *pdev)
2264{
2265 struct device_domain_info *info;
2266
2267 if (likely(!iommu_identity_mapping))
2268 return 0;
2269
Mike Traviscb452a42011-05-28 13:15:03 -05002270 info = pdev->dev.archdata.iommu;
2271 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2272 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002273
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002274 return 0;
2275}
2276
2277static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002278 struct pci_dev *pdev,
2279 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002280{
2281 struct device_domain_info *info;
2282 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002283 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002284
2285 info = alloc_devinfo_mem();
2286 if (!info)
2287 return -ENOMEM;
2288
David Woodhouse5fe60f42009-08-09 10:53:41 +01002289 ret = domain_context_mapping(domain, pdev, translation);
2290 if (ret) {
2291 free_devinfo_mem(info);
2292 return ret;
2293 }
2294
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002295 info->segment = pci_domain_nr(pdev->bus);
2296 info->bus = pdev->bus->number;
2297 info->devfn = pdev->devfn;
2298 info->dev = pdev;
2299 info->domain = domain;
2300
2301 spin_lock_irqsave(&device_domain_lock, flags);
2302 list_add(&info->link, &domain->devices);
2303 list_add(&info->global, &device_domain_list);
2304 pdev->dev.archdata.iommu = info;
2305 spin_unlock_irqrestore(&device_domain_lock, flags);
2306
2307 return 0;
2308}
2309
David Woodhouse6941af22009-07-04 18:24:27 +01002310static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2311{
David Woodhousee0fc7e02009-09-30 09:12:17 -07002312 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2313 return 1;
2314
2315 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2316 return 1;
2317
2318 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2319 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002320
David Woodhouse3dfc8132009-07-04 19:11:08 +01002321 /*
2322 * We want to start off with all devices in the 1:1 domain, and
2323 * take them out later if we find they can't access all of memory.
2324 *
2325 * However, we can't do this for PCI devices behind bridges,
2326 * because all PCI devices behind the same bridge will end up
2327 * with the same source-id on their transactions.
2328 *
2329 * Practically speaking, we can't change things around for these
2330 * devices at run-time, because we can't be sure there'll be no
2331 * DMA transactions in flight for any of their siblings.
2332 *
2333 * So PCI devices (unless they're on the root bus) as well as
2334 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2335 * the 1:1 domain, just in _case_ one of their siblings turns out
2336 * not to be able to map all of memory.
2337 */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002338 if (!pci_is_pcie(pdev)) {
David Woodhouse3dfc8132009-07-04 19:11:08 +01002339 if (!pci_is_root_bus(pdev->bus))
2340 return 0;
2341 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2342 return 0;
2343 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2344 return 0;
2345
2346 /*
2347 * At boot time, we don't yet know if devices will be 64-bit capable.
2348 * Assume that they will -- if they turn out not to be, then we can
2349 * take them out of the 1:1 domain later.
2350 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002351 if (!startup) {
2352 /*
2353 * If the device's dma_mask is less than the system's memory
2354 * size then this is not a candidate for identity mapping.
2355 */
2356 u64 dma_mask = pdev->dma_mask;
2357
2358 if (pdev->dev.coherent_dma_mask &&
2359 pdev->dev.coherent_dma_mask < dma_mask)
2360 dma_mask = pdev->dev.coherent_dma_mask;
2361
2362 return dma_mask >= dma_get_required_mask(&pdev->dev);
2363 }
David Woodhouse6941af22009-07-04 18:24:27 +01002364
2365 return 1;
2366}
2367
Matt Kraai071e1372009-08-23 22:30:22 -07002368static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002369{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002370 struct pci_dev *pdev = NULL;
2371 int ret;
2372
David Woodhouse19943b02009-08-04 16:19:20 +01002373 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002374 if (ret)
2375 return -EFAULT;
2376
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002377 for_each_pci_dev(pdev) {
David Woodhouse6941af22009-07-04 18:24:27 +01002378 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse5fe60f42009-08-09 10:53:41 +01002379 ret = domain_add_dev_info(si_domain, pdev,
Mike Traviseae460b2012-03-05 15:05:16 -08002380 hw ? CONTEXT_TT_PASS_THROUGH :
2381 CONTEXT_TT_MULTI_LEVEL);
2382 if (ret) {
2383 /* device not associated with an iommu */
2384 if (ret == -ENODEV)
2385 continue;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002386 return ret;
Mike Traviseae460b2012-03-05 15:05:16 -08002387 }
2388 pr_info("IOMMU: %s identity mapping for device %s\n",
2389 hw ? "hardware" : "software", pci_name(pdev));
David Woodhouse62edf5d2009-07-04 10:59:46 +01002390 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002391 }
2392
2393 return 0;
2394}
2395
Joseph Cihulab7792602011-05-03 00:08:37 -07002396static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002397{
2398 struct dmar_drhd_unit *drhd;
2399 struct dmar_rmrr_unit *rmrr;
2400 struct pci_dev *pdev;
2401 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002402 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002403
2404 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002405 * for each drhd
2406 * allocate root
2407 * initialize and program root entry to not present
2408 * endfor
2409 */
2410 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002411 /*
2412 * lock not needed as this is only incremented in the single
2413 * threaded kernel __init code path all other access are read
2414 * only
2415 */
Mike Travis1b198bb2012-03-05 15:05:16 -08002416 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2417 g_num_of_iommus++;
2418 continue;
2419 }
2420 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2421 IOMMU_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002422 }
2423
Weidong Hand9630fe2008-12-08 11:06:32 +08002424 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2425 GFP_KERNEL);
2426 if (!g_iommus) {
2427 printk(KERN_ERR "Allocating global iommu array failed\n");
2428 ret = -ENOMEM;
2429 goto error;
2430 }
2431
mark gross80b20dd2008-04-18 13:53:58 -07002432 deferred_flush = kzalloc(g_num_of_iommus *
2433 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2434 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002435 ret = -ENOMEM;
2436 goto error;
2437 }
2438
mark gross5e0d2a62008-03-04 15:22:08 -08002439 for_each_drhd_unit(drhd) {
2440 if (drhd->ignored)
2441 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002442
2443 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002444 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002445
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002446 ret = iommu_init_domains(iommu);
2447 if (ret)
2448 goto error;
2449
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002450 /*
2451 * TBD:
2452 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002453 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002454 */
2455 ret = iommu_alloc_root_entry(iommu);
2456 if (ret) {
2457 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2458 goto error;
2459 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002460 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002461 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002462 }
2463
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002464 /*
2465 * Start from the sane iommu hardware state.
2466 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002467 for_each_drhd_unit(drhd) {
2468 if (drhd->ignored)
2469 continue;
2470
2471 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002472
2473 /*
2474 * If the queued invalidation is already initialized by us
2475 * (for example, while enabling interrupt-remapping) then
2476 * we got the things already rolling from a sane state.
2477 */
2478 if (iommu->qi)
2479 continue;
2480
2481 /*
2482 * Clear any previous faults.
2483 */
2484 dmar_fault(-1, iommu);
2485 /*
2486 * Disable queued invalidation if supported and already enabled
2487 * before OS handover.
2488 */
2489 dmar_disable_qi(iommu);
2490 }
2491
2492 for_each_drhd_unit(drhd) {
2493 if (drhd->ignored)
2494 continue;
2495
2496 iommu = drhd->iommu;
2497
Youquan Songa77b67d2008-10-16 16:31:56 -07002498 if (dmar_enable_qi(iommu)) {
2499 /*
2500 * Queued Invalidate not enabled, use Register Based
2501 * Invalidate
2502 */
2503 iommu->flush.flush_context = __iommu_flush_context;
2504 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002505 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002506 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002507 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002508 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002509 } else {
2510 iommu->flush.flush_context = qi_flush_context;
2511 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002512 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002513 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002514 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002515 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002516 }
2517 }
2518
David Woodhouse19943b02009-08-04 16:19:20 +01002519 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002520 iommu_identity_mapping |= IDENTMAP_ALL;
2521
Suresh Siddhad3f13812011-08-23 17:05:25 -07002522#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002523 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002524#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002525
2526 check_tylersburg_isoch();
2527
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002528 /*
2529 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002530 * identity mappings for rmrr, gfx, and isa and may fall back to static
2531 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002532 */
David Woodhouse19943b02009-08-04 16:19:20 +01002533 if (iommu_identity_mapping) {
2534 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2535 if (ret) {
2536 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2537 goto error;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002538 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002539 }
David Woodhouse19943b02009-08-04 16:19:20 +01002540 /*
2541 * For each rmrr
2542 * for each dev attached to rmrr
2543 * do
2544 * locate drhd for dev, alloc domain for dev
2545 * allocate free domain
2546 * allocate page table entries for rmrr
2547 * if context not allocated for bus
2548 * allocate and init context
2549 * set present in root table for this bus
2550 * init context with domain, translation etc
2551 * endfor
2552 * endfor
2553 */
2554 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2555 for_each_rmrr_units(rmrr) {
2556 for (i = 0; i < rmrr->devices_cnt; i++) {
2557 pdev = rmrr->devices[i];
2558 /*
2559 * some BIOS lists non-exist devices in DMAR
2560 * table.
2561 */
2562 if (!pdev)
2563 continue;
2564 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2565 if (ret)
2566 printk(KERN_ERR
2567 "IOMMU: mapping reserved region failed\n");
2568 }
2569 }
2570
2571 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002572
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002573 /*
2574 * for each drhd
2575 * enable fault log
2576 * global invalidate context cache
2577 * global invalidate iotlb
2578 * enable translation
2579 */
2580 for_each_drhd_unit(drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002581 if (drhd->ignored) {
2582 /*
2583 * we always have to disable PMRs or DMA may fail on
2584 * this device
2585 */
2586 if (force_on)
2587 iommu_disable_protect_mem_regions(drhd->iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002588 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002589 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002590 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002591
2592 iommu_flush_write_buffer(iommu);
2593
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002594 ret = dmar_set_interrupt(iommu);
2595 if (ret)
2596 goto error;
2597
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002598 iommu_set_root_entry(iommu);
2599
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002600 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002601 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002602
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002603 ret = iommu_enable_translation(iommu);
2604 if (ret)
2605 goto error;
David Woodhouseb94996c2009-09-19 15:28:12 -07002606
2607 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002608 }
2609
2610 return 0;
2611error:
2612 for_each_drhd_unit(drhd) {
2613 if (drhd->ignored)
2614 continue;
2615 iommu = drhd->iommu;
2616 free_iommu(iommu);
2617 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002618 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002619 return ret;
2620}
2621
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002622/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002623static struct iova *intel_alloc_iova(struct device *dev,
2624 struct dmar_domain *domain,
2625 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002626{
2627 struct pci_dev *pdev = to_pci_dev(dev);
2628 struct iova *iova = NULL;
2629
David Woodhouse875764d2009-06-28 21:20:51 +01002630 /* Restrict dma_mask to the width that the iommu can handle */
2631 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2632
2633 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002634 /*
2635 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002636 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002637 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002638 */
David Woodhouse875764d2009-06-28 21:20:51 +01002639 iova = alloc_iova(&domain->iovad, nrpages,
2640 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2641 if (iova)
2642 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002643 }
David Woodhouse875764d2009-06-28 21:20:51 +01002644 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2645 if (unlikely(!iova)) {
2646 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2647 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002648 return NULL;
2649 }
2650
2651 return iova;
2652}
2653
David Woodhouse147202a2009-07-07 19:43:20 +01002654static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002655{
2656 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002657 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002658
2659 domain = get_domain_for_dev(pdev,
2660 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2661 if (!domain) {
2662 printk(KERN_ERR
2663 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002664 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002665 }
2666
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002667 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002668 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002669 ret = domain_context_mapping(domain, pdev,
2670 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002671 if (ret) {
2672 printk(KERN_ERR
2673 "Domain context map for %s failed",
2674 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002675 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002676 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002677 }
2678
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002679 return domain;
2680}
2681
David Woodhouse147202a2009-07-07 19:43:20 +01002682static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2683{
2684 struct device_domain_info *info;
2685
2686 /* No lock here, assumes no domain exit in normal case */
2687 info = dev->dev.archdata.iommu;
2688 if (likely(info))
2689 return info->domain;
2690
2691 return __get_valid_domain_for_dev(dev);
2692}
2693
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002694static int iommu_dummy(struct pci_dev *pdev)
2695{
2696 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2697}
2698
2699/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002700static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002701{
David Woodhouse73676832009-07-04 14:08:36 +01002702 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002703 int found;
2704
David Woodhouse73676832009-07-04 14:08:36 +01002705 if (unlikely(dev->bus != &pci_bus_type))
2706 return 1;
2707
2708 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002709 if (iommu_dummy(pdev))
2710 return 1;
2711
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002712 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002713 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002714
2715 found = identity_mapping(pdev);
2716 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002717 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002718 return 1;
2719 else {
2720 /*
2721 * 32 bit DMA is removed from si_domain and fall back
2722 * to non-identity mapping.
2723 */
2724 domain_remove_one_dev_info(si_domain, pdev);
2725 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2726 pci_name(pdev));
2727 return 0;
2728 }
2729 } else {
2730 /*
2731 * In case of a detached 64 bit DMA device from vm, the device
2732 * is put into si_domain for identity mapping.
2733 */
David Woodhouse6941af22009-07-04 18:24:27 +01002734 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002735 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002736 ret = domain_add_dev_info(si_domain, pdev,
2737 hw_pass_through ?
2738 CONTEXT_TT_PASS_THROUGH :
2739 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002740 if (!ret) {
2741 printk(KERN_INFO "64bit %s uses identity mapping\n",
2742 pci_name(pdev));
2743 return 1;
2744 }
2745 }
2746 }
2747
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002748 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002749}
2750
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002751static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2752 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002753{
2754 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002755 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002756 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002757 struct iova *iova;
2758 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002759 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002760 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002761 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002762
2763 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002764
David Woodhouse73676832009-07-04 14:08:36 +01002765 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002766 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002767
2768 domain = get_valid_domain_for_dev(pdev);
2769 if (!domain)
2770 return 0;
2771
Weidong Han8c11e792008-12-08 15:29:22 +08002772 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002773 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002774
Mike Travisc681d0b2011-05-28 13:15:05 -05002775 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002776 if (!iova)
2777 goto error;
2778
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002779 /*
2780 * Check if DMAR supports zero-length reads on write only
2781 * mappings..
2782 */
2783 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002784 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002785 prot |= DMA_PTE_READ;
2786 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2787 prot |= DMA_PTE_WRITE;
2788 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002789 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002790 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002791 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002792 * is not a big problem
2793 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002794 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002795 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002796 if (ret)
2797 goto error;
2798
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002799 /* it's a non-present to present mapping. Only flush if caching mode */
2800 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002801 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002802 else
Weidong Han8c11e792008-12-08 15:29:22 +08002803 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002804
David Woodhouse03d6a242009-06-28 15:33:46 +01002805 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2806 start_paddr += paddr & ~PAGE_MASK;
2807 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002808
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002809error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002810 if (iova)
2811 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002812 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002813 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002814 return 0;
2815}
2816
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002817static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2818 unsigned long offset, size_t size,
2819 enum dma_data_direction dir,
2820 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002821{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002822 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2823 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002824}
2825
mark gross5e0d2a62008-03-04 15:22:08 -08002826static void flush_unmaps(void)
2827{
mark gross80b20dd2008-04-18 13:53:58 -07002828 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002829
mark gross5e0d2a62008-03-04 15:22:08 -08002830 timer_on = 0;
2831
2832 /* just flush them all */
2833 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002834 struct intel_iommu *iommu = g_iommus[i];
2835 if (!iommu)
2836 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002837
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002838 if (!deferred_flush[i].next)
2839 continue;
2840
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002841 /* In caching mode, global flushes turn emulation expensive */
2842 if (!cap_caching_mode(iommu->cap))
2843 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002844 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002845 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002846 unsigned long mask;
2847 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002848 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08002849
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002850 /* On real hardware multiple invalidations are expensive */
2851 if (cap_caching_mode(iommu->cap))
2852 iommu_flush_iotlb_psi(iommu, domain->id,
2853 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2854 else {
2855 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2856 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2857 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2858 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002859 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002860 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002861 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002862 }
2863
mark gross5e0d2a62008-03-04 15:22:08 -08002864 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002865}
2866
2867static void flush_unmaps_timeout(unsigned long data)
2868{
mark gross80b20dd2008-04-18 13:53:58 -07002869 unsigned long flags;
2870
2871 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002872 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002873 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002874}
2875
2876static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2877{
2878 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002879 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002880 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002881
2882 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002883 if (list_size == HIGH_WATER_MARK)
2884 flush_unmaps();
2885
Weidong Han8c11e792008-12-08 15:29:22 +08002886 iommu = domain_get_iommu(dom);
2887 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002888
mark gross80b20dd2008-04-18 13:53:58 -07002889 next = deferred_flush[iommu_id].next;
2890 deferred_flush[iommu_id].domain[next] = dom;
2891 deferred_flush[iommu_id].iova[next] = iova;
2892 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002893
2894 if (!timer_on) {
2895 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2896 timer_on = 1;
2897 }
2898 list_size++;
2899 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2900}
2901
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002902static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2903 size_t size, enum dma_data_direction dir,
2904 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002905{
2906 struct pci_dev *pdev = to_pci_dev(dev);
2907 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002908 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002909 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002910 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002911
David Woodhouse73676832009-07-04 14:08:36 +01002912 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002913 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002914
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002915 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002916 BUG_ON(!domain);
2917
Weidong Han8c11e792008-12-08 15:29:22 +08002918 iommu = domain_get_iommu(domain);
2919
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002920 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002921 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2922 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002923 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002924
David Woodhoused794dc92009-06-28 00:27:49 +01002925 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2926 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002927
David Woodhoused794dc92009-06-28 00:27:49 +01002928 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2929 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002930
2931 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002932 dma_pte_clear_range(domain, start_pfn, last_pfn);
2933
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002934 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002935 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2936
mark gross5e0d2a62008-03-04 15:22:08 -08002937 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002938 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002939 last_pfn - start_pfn + 1, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08002940 /* free iova */
2941 __free_iova(&domain->iovad, iova);
2942 } else {
2943 add_unmap(domain, iova);
2944 /*
2945 * queue up the release of the unmap to save the 1/6th of the
2946 * cpu used up by the iotlb flush operation...
2947 */
mark gross5e0d2a62008-03-04 15:22:08 -08002948 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002949}
2950
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002951static void *intel_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02002952 dma_addr_t *dma_handle, gfp_t flags,
2953 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002954{
2955 void *vaddr;
2956 int order;
2957
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002958 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002959 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07002960
2961 if (!iommu_no_mapping(hwdev))
2962 flags &= ~(GFP_DMA | GFP_DMA32);
2963 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2964 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2965 flags |= GFP_DMA;
2966 else
2967 flags |= GFP_DMA32;
2968 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002969
2970 vaddr = (void *)__get_free_pages(flags, order);
2971 if (!vaddr)
2972 return NULL;
2973 memset(vaddr, 0, size);
2974
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002975 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2976 DMA_BIDIRECTIONAL,
2977 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002978 if (*dma_handle)
2979 return vaddr;
2980 free_pages((unsigned long)vaddr, order);
2981 return NULL;
2982}
2983
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002984static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02002985 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002986{
2987 int order;
2988
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002989 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002990 order = get_order(size);
2991
David Woodhouse0db9b7a2009-07-14 02:01:57 +01002992 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002993 free_pages((unsigned long)vaddr, order);
2994}
2995
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002996static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2997 int nelems, enum dma_data_direction dir,
2998 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002999{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003000 struct pci_dev *pdev = to_pci_dev(hwdev);
3001 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003002 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003003 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003004 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003005
David Woodhouse73676832009-07-04 14:08:36 +01003006 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003007 return;
3008
3009 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08003010 BUG_ON(!domain);
3011
3012 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003013
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003014 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01003015 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3016 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003017 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003018
David Woodhoused794dc92009-06-28 00:27:49 +01003019 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3020 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003021
3022 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01003023 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003024
David Woodhoused794dc92009-06-28 00:27:49 +01003025 /* free page tables */
3026 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3027
David Woodhouseacea0012009-07-14 01:55:11 +01003028 if (intel_iommu_strict) {
3029 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03003030 last_pfn - start_pfn + 1, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003031 /* free iova */
3032 __free_iova(&domain->iovad, iova);
3033 } else {
3034 add_unmap(domain, iova);
3035 /*
3036 * queue up the release of the unmap to save the 1/6th of the
3037 * cpu used up by the iotlb flush operation...
3038 */
3039 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003040}
3041
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003042static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003043 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003044{
3045 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003046 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003047
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003048 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003049 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003050 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003051 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003052 }
3053 return nelems;
3054}
3055
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003056static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3057 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003058{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003059 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003060 struct pci_dev *pdev = to_pci_dev(hwdev);
3061 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003062 size_t size = 0;
3063 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003064 struct iova *iova = NULL;
3065 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003066 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003067 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003068 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003069
3070 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01003071 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003072 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003073
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003074 domain = get_valid_domain_for_dev(pdev);
3075 if (!domain)
3076 return 0;
3077
Weidong Han8c11e792008-12-08 15:29:22 +08003078 iommu = domain_get_iommu(domain);
3079
David Woodhouseb536d242009-06-28 14:49:31 +01003080 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003081 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003082
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003083 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3084 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003085 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003086 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003087 return 0;
3088 }
3089
3090 /*
3091 * Check if DMAR supports zero-length reads on write only
3092 * mappings..
3093 */
3094 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003095 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003096 prot |= DMA_PTE_READ;
3097 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3098 prot |= DMA_PTE_WRITE;
3099
David Woodhouseb536d242009-06-28 14:49:31 +01003100 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003101
Fenghua Yuf5329592009-08-04 15:09:37 -07003102 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003103 if (unlikely(ret)) {
3104 /* clear the page */
3105 dma_pte_clear_range(domain, start_vpfn,
3106 start_vpfn + size - 1);
3107 /* free page tables */
3108 dma_pte_free_pagetable(domain, start_vpfn,
3109 start_vpfn + size - 1);
3110 /* free iova */
3111 __free_iova(&domain->iovad, iova);
3112 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003113 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003114
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003115 /* it's a non-present to present mapping. Only flush if caching mode */
3116 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03003117 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003118 else
Weidong Han8c11e792008-12-08 15:29:22 +08003119 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003120
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003121 return nelems;
3122}
3123
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003124static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3125{
3126 return !dma_addr;
3127}
3128
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003129struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003130 .alloc = intel_alloc_coherent,
3131 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003132 .map_sg = intel_map_sg,
3133 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003134 .map_page = intel_map_page,
3135 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003136 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003137};
3138
3139static inline int iommu_domain_cache_init(void)
3140{
3141 int ret = 0;
3142
3143 iommu_domain_cache = kmem_cache_create("iommu_domain",
3144 sizeof(struct dmar_domain),
3145 0,
3146 SLAB_HWCACHE_ALIGN,
3147
3148 NULL);
3149 if (!iommu_domain_cache) {
3150 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3151 ret = -ENOMEM;
3152 }
3153
3154 return ret;
3155}
3156
3157static inline int iommu_devinfo_cache_init(void)
3158{
3159 int ret = 0;
3160
3161 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3162 sizeof(struct device_domain_info),
3163 0,
3164 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003165 NULL);
3166 if (!iommu_devinfo_cache) {
3167 printk(KERN_ERR "Couldn't create devinfo cache\n");
3168 ret = -ENOMEM;
3169 }
3170
3171 return ret;
3172}
3173
3174static inline int iommu_iova_cache_init(void)
3175{
3176 int ret = 0;
3177
3178 iommu_iova_cache = kmem_cache_create("iommu_iova",
3179 sizeof(struct iova),
3180 0,
3181 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003182 NULL);
3183 if (!iommu_iova_cache) {
3184 printk(KERN_ERR "Couldn't create iova cache\n");
3185 ret = -ENOMEM;
3186 }
3187
3188 return ret;
3189}
3190
3191static int __init iommu_init_mempool(void)
3192{
3193 int ret;
3194 ret = iommu_iova_cache_init();
3195 if (ret)
3196 return ret;
3197
3198 ret = iommu_domain_cache_init();
3199 if (ret)
3200 goto domain_error;
3201
3202 ret = iommu_devinfo_cache_init();
3203 if (!ret)
3204 return ret;
3205
3206 kmem_cache_destroy(iommu_domain_cache);
3207domain_error:
3208 kmem_cache_destroy(iommu_iova_cache);
3209
3210 return -ENOMEM;
3211}
3212
3213static void __init iommu_exit_mempool(void)
3214{
3215 kmem_cache_destroy(iommu_devinfo_cache);
3216 kmem_cache_destroy(iommu_domain_cache);
3217 kmem_cache_destroy(iommu_iova_cache);
3218
3219}
3220
Dan Williams556ab452010-07-23 15:47:56 -07003221static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3222{
3223 struct dmar_drhd_unit *drhd;
3224 u32 vtbar;
3225 int rc;
3226
3227 /* We know that this device on this chipset has its own IOMMU.
3228 * If we find it under a different IOMMU, then the BIOS is lying
3229 * to us. Hope that the IOMMU for this device is actually
3230 * disabled, and it needs no translation...
3231 */
3232 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3233 if (rc) {
3234 /* "can't" happen */
3235 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3236 return;
3237 }
3238 vtbar &= 0xffff0000;
3239
3240 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3241 drhd = dmar_find_matched_drhd_unit(pdev);
3242 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3243 TAINT_FIRMWARE_WORKAROUND,
3244 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3245 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3246}
3247DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3248
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003249static void __init init_no_remapping_devices(void)
3250{
3251 struct dmar_drhd_unit *drhd;
3252
3253 for_each_drhd_unit(drhd) {
3254 if (!drhd->include_all) {
3255 int i;
3256 for (i = 0; i < drhd->devices_cnt; i++)
3257 if (drhd->devices[i] != NULL)
3258 break;
3259 /* ignore DMAR unit if no pci devices exist */
3260 if (i == drhd->devices_cnt)
3261 drhd->ignored = 1;
3262 }
3263 }
3264
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003265 for_each_drhd_unit(drhd) {
3266 int i;
3267 if (drhd->ignored || drhd->include_all)
3268 continue;
3269
3270 for (i = 0; i < drhd->devices_cnt; i++)
3271 if (drhd->devices[i] &&
David Woodhousec0771df2011-10-14 20:59:46 +01003272 !IS_GFX_DEVICE(drhd->devices[i]))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003273 break;
3274
3275 if (i < drhd->devices_cnt)
3276 continue;
3277
David Woodhousec0771df2011-10-14 20:59:46 +01003278 /* This IOMMU has *only* gfx devices. Either bypass it or
3279 set the gfx_mapped flag, as appropriate */
3280 if (dmar_map_gfx) {
3281 intel_iommu_gfx_mapped = 1;
3282 } else {
3283 drhd->ignored = 1;
3284 for (i = 0; i < drhd->devices_cnt; i++) {
3285 if (!drhd->devices[i])
3286 continue;
3287 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3288 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003289 }
3290 }
3291}
3292
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003293#ifdef CONFIG_SUSPEND
3294static int init_iommu_hw(void)
3295{
3296 struct dmar_drhd_unit *drhd;
3297 struct intel_iommu *iommu = NULL;
3298
3299 for_each_active_iommu(iommu, drhd)
3300 if (iommu->qi)
3301 dmar_reenable_qi(iommu);
3302
Joseph Cihulab7792602011-05-03 00:08:37 -07003303 for_each_iommu(iommu, drhd) {
3304 if (drhd->ignored) {
3305 /*
3306 * we always have to disable PMRs or DMA may fail on
3307 * this device
3308 */
3309 if (force_on)
3310 iommu_disable_protect_mem_regions(iommu);
3311 continue;
3312 }
3313
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003314 iommu_flush_write_buffer(iommu);
3315
3316 iommu_set_root_entry(iommu);
3317
3318 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003319 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003320 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003321 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003322 if (iommu_enable_translation(iommu))
3323 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003324 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003325 }
3326
3327 return 0;
3328}
3329
3330static void iommu_flush_all(void)
3331{
3332 struct dmar_drhd_unit *drhd;
3333 struct intel_iommu *iommu;
3334
3335 for_each_active_iommu(iommu, drhd) {
3336 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003337 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003338 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003339 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003340 }
3341}
3342
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003343static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003344{
3345 struct dmar_drhd_unit *drhd;
3346 struct intel_iommu *iommu = NULL;
3347 unsigned long flag;
3348
3349 for_each_active_iommu(iommu, drhd) {
3350 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3351 GFP_ATOMIC);
3352 if (!iommu->iommu_state)
3353 goto nomem;
3354 }
3355
3356 iommu_flush_all();
3357
3358 for_each_active_iommu(iommu, drhd) {
3359 iommu_disable_translation(iommu);
3360
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003361 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003362
3363 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3364 readl(iommu->reg + DMAR_FECTL_REG);
3365 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3366 readl(iommu->reg + DMAR_FEDATA_REG);
3367 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3368 readl(iommu->reg + DMAR_FEADDR_REG);
3369 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3370 readl(iommu->reg + DMAR_FEUADDR_REG);
3371
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003372 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003373 }
3374 return 0;
3375
3376nomem:
3377 for_each_active_iommu(iommu, drhd)
3378 kfree(iommu->iommu_state);
3379
3380 return -ENOMEM;
3381}
3382
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003383static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003384{
3385 struct dmar_drhd_unit *drhd;
3386 struct intel_iommu *iommu = NULL;
3387 unsigned long flag;
3388
3389 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003390 if (force_on)
3391 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3392 else
3393 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003394 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003395 }
3396
3397 for_each_active_iommu(iommu, drhd) {
3398
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003399 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003400
3401 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3402 iommu->reg + DMAR_FECTL_REG);
3403 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3404 iommu->reg + DMAR_FEDATA_REG);
3405 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3406 iommu->reg + DMAR_FEADDR_REG);
3407 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3408 iommu->reg + DMAR_FEUADDR_REG);
3409
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003410 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003411 }
3412
3413 for_each_active_iommu(iommu, drhd)
3414 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003415}
3416
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003417static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003418 .resume = iommu_resume,
3419 .suspend = iommu_suspend,
3420};
3421
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003422static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003423{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003424 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003425}
3426
3427#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003428static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003429#endif /* CONFIG_PM */
3430
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003431LIST_HEAD(dmar_rmrr_units);
3432
3433static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3434{
3435 list_add(&rmrr->list, &dmar_rmrr_units);
3436}
3437
3438
3439int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3440{
3441 struct acpi_dmar_reserved_memory *rmrr;
3442 struct dmar_rmrr_unit *rmrru;
3443
3444 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3445 if (!rmrru)
3446 return -ENOMEM;
3447
3448 rmrru->hdr = header;
3449 rmrr = (struct acpi_dmar_reserved_memory *)header;
3450 rmrru->base_address = rmrr->base_address;
3451 rmrru->end_address = rmrr->end_address;
3452
3453 dmar_register_rmrr_unit(rmrru);
3454 return 0;
3455}
3456
3457static int __init
3458rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3459{
3460 struct acpi_dmar_reserved_memory *rmrr;
3461 int ret;
3462
3463 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3464 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3465 ((void *)rmrr) + rmrr->header.length,
3466 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3467
3468 if (ret || (rmrru->devices_cnt == 0)) {
3469 list_del(&rmrru->list);
3470 kfree(rmrru);
3471 }
3472 return ret;
3473}
3474
3475static LIST_HEAD(dmar_atsr_units);
3476
3477int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3478{
3479 struct acpi_dmar_atsr *atsr;
3480 struct dmar_atsr_unit *atsru;
3481
3482 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3483 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3484 if (!atsru)
3485 return -ENOMEM;
3486
3487 atsru->hdr = hdr;
3488 atsru->include_all = atsr->flags & 0x1;
3489
3490 list_add(&atsru->list, &dmar_atsr_units);
3491
3492 return 0;
3493}
3494
3495static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3496{
3497 int rc;
3498 struct acpi_dmar_atsr *atsr;
3499
3500 if (atsru->include_all)
3501 return 0;
3502
3503 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3504 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3505 (void *)atsr + atsr->header.length,
3506 &atsru->devices_cnt, &atsru->devices,
3507 atsr->segment);
3508 if (rc || !atsru->devices_cnt) {
3509 list_del(&atsru->list);
3510 kfree(atsru);
3511 }
3512
3513 return rc;
3514}
3515
3516int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3517{
3518 int i;
3519 struct pci_bus *bus;
3520 struct acpi_dmar_atsr *atsr;
3521 struct dmar_atsr_unit *atsru;
3522
3523 dev = pci_physfn(dev);
3524
3525 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3526 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3527 if (atsr->segment == pci_domain_nr(dev->bus))
3528 goto found;
3529 }
3530
3531 return 0;
3532
3533found:
3534 for (bus = dev->bus; bus; bus = bus->parent) {
3535 struct pci_dev *bridge = bus->self;
3536
3537 if (!bridge || !pci_is_pcie(bridge) ||
3538 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
3539 return 0;
3540
3541 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
3542 for (i = 0; i < atsru->devices_cnt; i++)
3543 if (atsru->devices[i] == bridge)
3544 return 1;
3545 break;
3546 }
3547 }
3548
3549 if (atsru->include_all)
3550 return 1;
3551
3552 return 0;
3553}
3554
Sergey Senozhatskyc8f369a2011-10-26 18:45:39 +03003555int __init dmar_parse_rmrr_atsr_dev(void)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003556{
3557 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3558 struct dmar_atsr_unit *atsr, *atsr_n;
3559 int ret = 0;
3560
3561 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3562 ret = rmrr_parse_dev(rmrr);
3563 if (ret)
3564 return ret;
3565 }
3566
3567 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3568 ret = atsr_parse_dev(atsr);
3569 if (ret)
3570 return ret;
3571 }
3572
3573 return ret;
3574}
3575
Fenghua Yu99dcade2009-11-11 07:23:06 -08003576/*
3577 * Here we only respond to action of unbound device from driver.
3578 *
3579 * Added device is not attached to its DMAR domain here yet. That will happen
3580 * when mapping the device to iova.
3581 */
3582static int device_notifier(struct notifier_block *nb,
3583 unsigned long action, void *data)
3584{
3585 struct device *dev = data;
3586 struct pci_dev *pdev = to_pci_dev(dev);
3587 struct dmar_domain *domain;
3588
David Woodhouse44cd6132009-12-02 10:18:30 +00003589 if (iommu_no_mapping(dev))
3590 return 0;
3591
Fenghua Yu99dcade2009-11-11 07:23:06 -08003592 domain = find_domain(pdev);
3593 if (!domain)
3594 return 0;
3595
Alex Williamsona97590e2011-03-04 14:52:16 -07003596 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
Fenghua Yu99dcade2009-11-11 07:23:06 -08003597 domain_remove_one_dev_info(domain, pdev);
3598
Alex Williamsona97590e2011-03-04 14:52:16 -07003599 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3600 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3601 list_empty(&domain->devices))
3602 domain_exit(domain);
3603 }
3604
Fenghua Yu99dcade2009-11-11 07:23:06 -08003605 return 0;
3606}
3607
3608static struct notifier_block device_nb = {
3609 .notifier_call = device_notifier,
3610};
3611
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003612int __init intel_iommu_init(void)
3613{
3614 int ret = 0;
3615
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003616 /* VT-d is required for a TXT/tboot launch, so enforce that */
3617 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003618
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003619 if (dmar_table_init()) {
3620 if (force_on)
3621 panic("tboot: Failed to initialize DMAR table\n");
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003622 return -ENODEV;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003623 }
3624
Suresh Siddhac2c72862011-08-23 17:05:19 -07003625 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003626 if (force_on)
3627 panic("tboot: Failed to initialize DMAR device scope\n");
3628 return -ENODEV;
3629 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003630
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003631 if (no_iommu || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003632 return -ENODEV;
3633
Joseph Cihula51a63e62011-03-21 11:04:24 -07003634 if (iommu_init_mempool()) {
3635 if (force_on)
3636 panic("tboot: Failed to initialize iommu memory\n");
3637 return -ENODEV;
3638 }
3639
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003640 if (list_empty(&dmar_rmrr_units))
3641 printk(KERN_INFO "DMAR: No RMRR found\n");
3642
3643 if (list_empty(&dmar_atsr_units))
3644 printk(KERN_INFO "DMAR: No ATSR found\n");
3645
Joseph Cihula51a63e62011-03-21 11:04:24 -07003646 if (dmar_init_reserved_ranges()) {
3647 if (force_on)
3648 panic("tboot: Failed to reserve iommu ranges\n");
3649 return -ENODEV;
3650 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003651
3652 init_no_remapping_devices();
3653
Joseph Cihulab7792602011-05-03 00:08:37 -07003654 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003655 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003656 if (force_on)
3657 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003658 printk(KERN_ERR "IOMMU: dmar init failed\n");
3659 put_iova_domain(&reserved_iova_list);
3660 iommu_exit_mempool();
3661 return ret;
3662 }
3663 printk(KERN_INFO
3664 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3665
mark gross5e0d2a62008-03-04 15:22:08 -08003666 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003667#ifdef CONFIG_SWIOTLB
3668 swiotlb = 0;
3669#endif
David Woodhouse19943b02009-08-04 16:19:20 +01003670 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003671
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003672 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003673
Joerg Roedel4236d97d2011-09-06 17:56:07 +02003674 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003675
Fenghua Yu99dcade2009-11-11 07:23:06 -08003676 bus_register_notifier(&pci_bus_type, &device_nb);
3677
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02003678 intel_iommu_enabled = 1;
3679
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003680 return 0;
3681}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003682
Han, Weidong3199aa62009-02-26 17:31:12 +08003683static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3684 struct pci_dev *pdev)
3685{
3686 struct pci_dev *tmp, *parent;
3687
3688 if (!iommu || !pdev)
3689 return;
3690
3691 /* dependent device detach */
3692 tmp = pci_find_upstream_pcie_bridge(pdev);
3693 /* Secondary interface's bus number and devfn 0 */
3694 if (tmp) {
3695 parent = pdev->bus->self;
3696 while (parent != tmp) {
3697 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003698 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003699 parent = parent->bus->self;
3700 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05003701 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08003702 iommu_detach_dev(iommu,
3703 tmp->subordinate->number, 0);
3704 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003705 iommu_detach_dev(iommu, tmp->bus->number,
3706 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003707 }
3708}
3709
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003710static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003711 struct pci_dev *pdev)
3712{
3713 struct device_domain_info *info;
3714 struct intel_iommu *iommu;
3715 unsigned long flags;
3716 int found = 0;
3717 struct list_head *entry, *tmp;
3718
David Woodhouse276dbf992009-04-04 01:45:37 +01003719 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3720 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003721 if (!iommu)
3722 return;
3723
3724 spin_lock_irqsave(&device_domain_lock, flags);
3725 list_for_each_safe(entry, tmp, &domain->devices) {
3726 info = list_entry(entry, struct device_domain_info, link);
Mike Habeck8519dc42011-05-28 13:15:07 -05003727 if (info->segment == pci_domain_nr(pdev->bus) &&
3728 info->bus == pdev->bus->number &&
Weidong Hanc7151a82008-12-08 22:51:37 +08003729 info->devfn == pdev->devfn) {
3730 list_del(&info->link);
3731 list_del(&info->global);
3732 if (info->dev)
3733 info->dev->dev.archdata.iommu = NULL;
3734 spin_unlock_irqrestore(&device_domain_lock, flags);
3735
Yu Zhao93a23a72009-05-18 13:51:37 +08003736 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003737 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003738 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003739 free_devinfo_mem(info);
3740
3741 spin_lock_irqsave(&device_domain_lock, flags);
3742
3743 if (found)
3744 break;
3745 else
3746 continue;
3747 }
3748
3749 /* if there is no other devices under the same iommu
3750 * owned by this domain, clear this iommu in iommu_bmp
3751 * update iommu count and coherency
3752 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003753 if (iommu == device_to_iommu(info->segment, info->bus,
3754 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003755 found = 1;
3756 }
3757
Roland Dreier3e7abe22011-07-20 06:22:21 -07003758 spin_unlock_irqrestore(&device_domain_lock, flags);
3759
Weidong Hanc7151a82008-12-08 22:51:37 +08003760 if (found == 0) {
3761 unsigned long tmp_flags;
3762 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08003763 clear_bit(iommu->seq_id, domain->iommu_bmp);
Weidong Hanc7151a82008-12-08 22:51:37 +08003764 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003765 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003766 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07003767
Alex Williamson9b4554b2011-05-24 12:19:04 -04003768 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3769 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3770 spin_lock_irqsave(&iommu->lock, tmp_flags);
3771 clear_bit(domain->id, iommu->domain_ids);
3772 iommu->domains[domain->id] = NULL;
3773 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3774 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003775 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003776}
3777
3778static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3779{
3780 struct device_domain_info *info;
3781 struct intel_iommu *iommu;
3782 unsigned long flags1, flags2;
3783
3784 spin_lock_irqsave(&device_domain_lock, flags1);
3785 while (!list_empty(&domain->devices)) {
3786 info = list_entry(domain->devices.next,
3787 struct device_domain_info, link);
3788 list_del(&info->link);
3789 list_del(&info->global);
3790 if (info->dev)
3791 info->dev->dev.archdata.iommu = NULL;
3792
3793 spin_unlock_irqrestore(&device_domain_lock, flags1);
3794
Yu Zhao93a23a72009-05-18 13:51:37 +08003795 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003796 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003797 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003798 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003799
3800 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003801 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003802 */
3803 spin_lock_irqsave(&domain->iommu_lock, flags2);
3804 if (test_and_clear_bit(iommu->seq_id,
Mike Travis1b198bb2012-03-05 15:05:16 -08003805 domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08003806 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003807 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003808 }
3809 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3810
3811 free_devinfo_mem(info);
3812 spin_lock_irqsave(&device_domain_lock, flags1);
3813 }
3814 spin_unlock_irqrestore(&device_domain_lock, flags1);
3815}
3816
Weidong Han5e98c4b2008-12-08 23:03:27 +08003817/* domain id for virtual machine, it won't be set in context */
3818static unsigned long vm_domid;
3819
3820static struct dmar_domain *iommu_alloc_vm_domain(void)
3821{
3822 struct dmar_domain *domain;
3823
3824 domain = alloc_domain_mem();
3825 if (!domain)
3826 return NULL;
3827
3828 domain->id = vm_domid++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003829 domain->nid = -1;
Mike Travis1b198bb2012-03-05 15:05:16 -08003830 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003831 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3832
3833 return domain;
3834}
3835
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003836static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003837{
3838 int adjust_width;
3839
3840 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003841 spin_lock_init(&domain->iommu_lock);
3842
3843 domain_reserve_special_ranges(domain);
3844
3845 /* calculate AGAW */
3846 domain->gaw = guest_width;
3847 adjust_width = guestwidth_to_adjustwidth(guest_width);
3848 domain->agaw = width_to_agaw(adjust_width);
3849
3850 INIT_LIST_HEAD(&domain->devices);
3851
3852 domain->iommu_count = 0;
3853 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003854 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01003855 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003856 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003857 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003858
3859 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07003860 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003861 if (!domain->pgd)
3862 return -ENOMEM;
3863 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3864 return 0;
3865}
3866
3867static void iommu_free_vm_domain(struct dmar_domain *domain)
3868{
3869 unsigned long flags;
3870 struct dmar_drhd_unit *drhd;
3871 struct intel_iommu *iommu;
3872 unsigned long i;
3873 unsigned long ndomains;
3874
3875 for_each_drhd_unit(drhd) {
3876 if (drhd->ignored)
3877 continue;
3878 iommu = drhd->iommu;
3879
3880 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08003881 for_each_set_bit(i, iommu->domain_ids, ndomains) {
Weidong Han5e98c4b2008-12-08 23:03:27 +08003882 if (iommu->domains[i] == domain) {
3883 spin_lock_irqsave(&iommu->lock, flags);
3884 clear_bit(i, iommu->domain_ids);
3885 iommu->domains[i] = NULL;
3886 spin_unlock_irqrestore(&iommu->lock, flags);
3887 break;
3888 }
Weidong Han5e98c4b2008-12-08 23:03:27 +08003889 }
3890 }
3891}
3892
3893static void vm_domain_exit(struct dmar_domain *domain)
3894{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003895 /* Domain 0 is reserved, so dont process it */
3896 if (!domain)
3897 return;
3898
3899 vm_domain_remove_all_dev_info(domain);
3900 /* destroy iovas */
3901 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003902
3903 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003904 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003905
3906 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003907 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003908
3909 iommu_free_vm_domain(domain);
3910 free_domain_mem(domain);
3911}
3912
Joerg Roedel5d450802008-12-03 14:52:32 +01003913static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003914{
Joerg Roedel5d450802008-12-03 14:52:32 +01003915 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003916
Joerg Roedel5d450802008-12-03 14:52:32 +01003917 dmar_domain = iommu_alloc_vm_domain();
3918 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003919 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003920 "intel_iommu_domain_init: dmar_domain == NULL\n");
3921 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003922 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003923 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003924 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003925 "intel_iommu_domain_init() failed\n");
3926 vm_domain_exit(dmar_domain);
3927 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003928 }
Allen Kay8140a952011-10-14 12:32:17 -07003929 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01003930 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003931
Joerg Roedel5d450802008-12-03 14:52:32 +01003932 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003933}
Kay, Allen M38717942008-09-09 18:37:29 +03003934
Joerg Roedel5d450802008-12-03 14:52:32 +01003935static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003936{
Joerg Roedel5d450802008-12-03 14:52:32 +01003937 struct dmar_domain *dmar_domain = domain->priv;
3938
3939 domain->priv = NULL;
3940 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003941}
Kay, Allen M38717942008-09-09 18:37:29 +03003942
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003943static int intel_iommu_attach_device(struct iommu_domain *domain,
3944 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003945{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003946 struct dmar_domain *dmar_domain = domain->priv;
3947 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003948 struct intel_iommu *iommu;
3949 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03003950
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003951 /* normally pdev is not mapped */
3952 if (unlikely(domain_context_mapped(pdev))) {
3953 struct dmar_domain *old_domain;
3954
3955 old_domain = find_domain(pdev);
3956 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003957 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3958 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3959 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003960 else
3961 domain_remove_dev_info(old_domain);
3962 }
3963 }
3964
David Woodhouse276dbf992009-04-04 01:45:37 +01003965 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3966 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003967 if (!iommu)
3968 return -ENODEV;
3969
3970 /* check if this iommu agaw is sufficient for max mapped address */
3971 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01003972 if (addr_width > cap_mgaw(iommu->cap))
3973 addr_width = cap_mgaw(iommu->cap);
3974
3975 if (dmar_domain->max_addr > (1LL << addr_width)) {
3976 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003977 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01003978 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003979 return -EFAULT;
3980 }
Tom Lyona99c47a2010-05-17 08:20:45 +01003981 dmar_domain->gaw = addr_width;
3982
3983 /*
3984 * Knock out extra levels of page tables if necessary
3985 */
3986 while (iommu->agaw < dmar_domain->agaw) {
3987 struct dma_pte *pte;
3988
3989 pte = dmar_domain->pgd;
3990 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08003991 dmar_domain->pgd = (struct dma_pte *)
3992 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01003993 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01003994 }
3995 dmar_domain->agaw--;
3996 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003997
David Woodhouse5fe60f42009-08-09 10:53:41 +01003998 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003999}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004000
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004001static void intel_iommu_detach_device(struct iommu_domain *domain,
4002 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004003{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004004 struct dmar_domain *dmar_domain = domain->priv;
4005 struct pci_dev *pdev = to_pci_dev(dev);
4006
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004007 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03004008}
Kay, Allen M38717942008-09-09 18:37:29 +03004009
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004010static int intel_iommu_map(struct iommu_domain *domain,
4011 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004012 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004013{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004014 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004015 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004016 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004017 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004018
Joerg Roedeldde57a22008-12-03 15:04:09 +01004019 if (iommu_prot & IOMMU_READ)
4020 prot |= DMA_PTE_READ;
4021 if (iommu_prot & IOMMU_WRITE)
4022 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004023 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4024 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004025
David Woodhouse163cc522009-06-28 00:51:17 +01004026 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004027 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004028 u64 end;
4029
4030 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004031 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004032 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004033 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004034 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004035 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004036 return -EFAULT;
4037 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004038 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004039 }
David Woodhousead051222009-06-28 14:22:28 +01004040 /* Round up size to next multiple of PAGE_SIZE, if it and
4041 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004042 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004043 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4044 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004045 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004046}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004047
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004048static size_t intel_iommu_unmap(struct iommu_domain *domain,
4049 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004050{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004051 struct dmar_domain *dmar_domain = domain->priv;
Allen Kay292827c2011-10-14 12:31:54 -07004052 int order;
Sheng Yang4b99d352009-07-08 11:52:52 +01004053
Allen Kay292827c2011-10-14 12:31:54 -07004054 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
David Woodhouse163cc522009-06-28 00:51:17 +01004055 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004056
David Woodhouse163cc522009-06-28 00:51:17 +01004057 if (dmar_domain->max_addr == iova + size)
4058 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004059
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004060 return PAGE_SIZE << order;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004061}
Kay, Allen M38717942008-09-09 18:37:29 +03004062
Joerg Roedeld14d6572008-12-03 15:06:57 +01004063static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4064 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004065{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004066 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004067 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004068 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004069
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004070 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
Kay, Allen M38717942008-09-09 18:37:29 +03004071 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004072 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004073
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004074 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004075}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004076
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004077static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4078 unsigned long cap)
4079{
4080 struct dmar_domain *dmar_domain = domain->priv;
4081
4082 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4083 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004084 if (cap == IOMMU_CAP_INTR_REMAP)
4085 return intr_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004086
4087 return 0;
4088}
4089
Alex Williamson70ae6f02011-10-21 15:56:11 -04004090/*
4091 * Group numbers are arbitrary. Device with the same group number
4092 * indicate the iommu cannot differentiate between them. To avoid
4093 * tracking used groups we just use the seg|bus|devfn of the lowest
4094 * level we're able to differentiate devices
4095 */
4096static int intel_iommu_device_group(struct device *dev, unsigned int *groupid)
4097{
4098 struct pci_dev *pdev = to_pci_dev(dev);
4099 struct pci_dev *bridge;
4100 union {
4101 struct {
4102 u8 devfn;
4103 u8 bus;
4104 u16 segment;
4105 } pci;
4106 u32 group;
4107 } id;
4108
4109 if (iommu_no_mapping(dev))
4110 return -ENODEV;
4111
4112 id.pci.segment = pci_domain_nr(pdev->bus);
4113 id.pci.bus = pdev->bus->number;
4114 id.pci.devfn = pdev->devfn;
4115
4116 if (!device_to_iommu(id.pci.segment, id.pci.bus, id.pci.devfn))
4117 return -ENODEV;
4118
4119 bridge = pci_find_upstream_pcie_bridge(pdev);
4120 if (bridge) {
4121 if (pci_is_pcie(bridge)) {
4122 id.pci.bus = bridge->subordinate->number;
4123 id.pci.devfn = 0;
4124 } else {
4125 id.pci.bus = bridge->bus->number;
4126 id.pci.devfn = bridge->devfn;
4127 }
4128 }
4129
Alex Williamsonbcb71ab2011-10-21 15:56:24 -04004130 if (!pdev->is_virtfn && iommu_group_mf)
4131 id.pci.devfn = PCI_DEVFN(PCI_SLOT(id.pci.devfn), 0);
4132
Alex Williamson70ae6f02011-10-21 15:56:11 -04004133 *groupid = id.group;
4134
4135 return 0;
4136}
4137
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004138static struct iommu_ops intel_iommu_ops = {
4139 .domain_init = intel_iommu_domain_init,
4140 .domain_destroy = intel_iommu_domain_destroy,
4141 .attach_dev = intel_iommu_attach_device,
4142 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004143 .map = intel_iommu_map,
4144 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004145 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004146 .domain_has_cap = intel_iommu_domain_has_cap,
Alex Williamson70ae6f02011-10-21 15:56:11 -04004147 .device_group = intel_iommu_device_group,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004148 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004149};
David Woodhouse9af88142009-02-13 23:18:03 +00004150
4151static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
4152{
4153 /*
4154 * Mobile 4 Series Chipset neglects to set RWBF capability,
4155 * but needs it:
4156 */
4157 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4158 rwbf_quirk = 1;
David Woodhouse2d9e6672010-06-15 10:57:57 +01004159
4160 /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
4161 if (dev->revision == 0x07) {
4162 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4163 dmar_map_gfx = 0;
4164 }
David Woodhouse9af88142009-02-13 23:18:03 +00004165}
4166
4167DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004168
Adam Jacksoneecfd572010-08-25 21:17:34 +01004169#define GGC 0x52
4170#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4171#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4172#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4173#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4174#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4175#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4176#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4177#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4178
David Woodhouse9eecabc2010-09-21 22:28:23 +01004179static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4180{
4181 unsigned short ggc;
4182
Adam Jacksoneecfd572010-08-25 21:17:34 +01004183 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004184 return;
4185
Adam Jacksoneecfd572010-08-25 21:17:34 +01004186 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004187 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4188 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004189 } else if (dmar_map_gfx) {
4190 /* we have to ensure the gfx device is idle before we flush */
4191 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4192 intel_iommu_strict = 1;
4193 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004194}
4195DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4196DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4197DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4198DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4199
David Woodhousee0fc7e02009-09-30 09:12:17 -07004200/* On Tylersburg chipsets, some BIOSes have been known to enable the
4201 ISOCH DMAR unit for the Azalia sound device, but not give it any
4202 TLB entries, which causes it to deadlock. Check for that. We do
4203 this in a function called from init_dmars(), instead of in a PCI
4204 quirk, because we don't want to print the obnoxious "BIOS broken"
4205 message if VT-d is actually disabled.
4206*/
4207static void __init check_tylersburg_isoch(void)
4208{
4209 struct pci_dev *pdev;
4210 uint32_t vtisochctrl;
4211
4212 /* If there's no Azalia in the system anyway, forget it. */
4213 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4214 if (!pdev)
4215 return;
4216 pci_dev_put(pdev);
4217
4218 /* System Management Registers. Might be hidden, in which case
4219 we can't do the sanity check. But that's OK, because the
4220 known-broken BIOSes _don't_ actually hide it, so far. */
4221 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4222 if (!pdev)
4223 return;
4224
4225 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4226 pci_dev_put(pdev);
4227 return;
4228 }
4229
4230 pci_dev_put(pdev);
4231
4232 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4233 if (vtisochctrl & 1)
4234 return;
4235
4236 /* Drop all bits other than the number of TLB entries */
4237 vtisochctrl &= 0x1c;
4238
4239 /* If we have the recommended number of TLB entries (16), fine. */
4240 if (vtisochctrl == 0x10)
4241 return;
4242
4243 /* Zero TLB entries? You get to ride the short bus to school. */
4244 if (!vtisochctrl) {
4245 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4246 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4247 dmi_get_system_info(DMI_BIOS_VENDOR),
4248 dmi_get_system_info(DMI_BIOS_VERSION),
4249 dmi_get_system_info(DMI_PRODUCT_VERSION));
4250 iommu_identity_mapping |= IDENTMAP_AZALIA;
4251 return;
4252 }
4253
4254 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4255 vtisochctrl);
4256}