blob: e39bfdc055c39cb20c377306c47bcbc034807fc1 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040027#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070028#include <linux/slab.h>
29#include <linux/irq.h>
30#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070031#include <linux/spinlock.h>
32#include <linux/pci.h>
33#include <linux/dmar.h>
34#include <linux/dma-mapping.h>
35#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080044#include <linux/memblock.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070045#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090046#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070047
Fenghua Yu5b6985c2008-10-16 18:02:32 -070048#define ROOT_SIZE VTD_PAGE_SIZE
49#define CONTEXT_SIZE VTD_PAGE_SIZE
50
Mike Travis825507d2011-05-28 13:15:06 -050051#define IS_BRIDGE_HOST_DEVICE(pdev) \
52 ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070053#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
54#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070055#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070056
57#define IOAPIC_RANGE_START (0xfee00000)
58#define IOAPIC_RANGE_END (0xfeefffff)
59#define IOVA_START_ADDR (0x1000)
60
61#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
62
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070063#define MAX_AGAW_WIDTH 64
64
David Woodhouse2ebe3152009-09-19 07:34:04 -070065#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
66#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
67
68/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
69 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
70#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
71 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
72#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070073
Mark McLoughlinf27be032008-11-20 15:49:43 +000074#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070075#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070076#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080077
Andrew Mortondf08cdc2010-09-22 13:05:11 -070078/* page table handling */
79#define LEVEL_STRIDE (9)
80#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
81
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020082/*
83 * This bitmap is used to advertise the page sizes our hardware support
84 * to the IOMMU core, which will then use this information to split
85 * physically contiguous memory regions it is mapping into page sizes
86 * that we support.
87 *
88 * Traditionally the IOMMU core just handed us the mappings directly,
89 * after making sure the size is an order of a 4KiB page and that the
90 * mapping has natural alignment.
91 *
92 * To retain this behavior, we currently advertise that we support
93 * all page sizes that are an order of 4KiB.
94 *
95 * If at some point we'd like to utilize the IOMMU core's new behavior,
96 * we could change this to advertise the real page sizes we support.
97 */
98#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
99
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700100static inline int agaw_to_level(int agaw)
101{
102 return agaw + 2;
103}
104
105static inline int agaw_to_width(int agaw)
106{
107 return 30 + agaw * LEVEL_STRIDE;
108}
109
110static inline int width_to_agaw(int width)
111{
112 return (width - 30) / LEVEL_STRIDE;
113}
114
115static inline unsigned int level_to_offset_bits(int level)
116{
117 return (level - 1) * LEVEL_STRIDE;
118}
119
120static inline int pfn_level_offset(unsigned long pfn, int level)
121{
122 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
123}
124
125static inline unsigned long level_mask(int level)
126{
127 return -1UL << level_to_offset_bits(level);
128}
129
130static inline unsigned long level_size(int level)
131{
132 return 1UL << level_to_offset_bits(level);
133}
134
135static inline unsigned long align_to_level(unsigned long pfn, int level)
136{
137 return (pfn + level_size(level) - 1) & level_mask(level);
138}
David Woodhousefd18de52009-05-10 23:57:41 +0100139
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100140static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
141{
142 return 1 << ((lvl - 1) * LEVEL_STRIDE);
143}
144
David Woodhousedd4e8312009-06-27 16:21:20 +0100145/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
146 are never going to work. */
147static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
148{
149 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
150}
151
152static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
153{
154 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
155}
156static inline unsigned long page_to_dma_pfn(struct page *pg)
157{
158 return mm_to_dma_pfn(page_to_pfn(pg));
159}
160static inline unsigned long virt_to_dma_pfn(void *p)
161{
162 return page_to_dma_pfn(virt_to_page(p));
163}
164
Weidong Hand9630fe2008-12-08 11:06:32 +0800165/* global iommu list, set NULL for ignored DMAR units */
166static struct intel_iommu **g_iommus;
167
David Woodhousee0fc7e02009-09-30 09:12:17 -0700168static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000169static int rwbf_quirk;
170
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000171/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700172 * set to 1 to panic kernel if can't successfully enable VT-d
173 * (used when kernel is launched w/ TXT)
174 */
175static int force_on = 0;
176
177/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000178 * 0: Present
179 * 1-11: Reserved
180 * 12-63: Context Ptr (12 - (haw-1))
181 * 64-127: Reserved
182 */
183struct root_entry {
184 u64 val;
185 u64 rsvd1;
186};
187#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
188static inline bool root_present(struct root_entry *root)
189{
190 return (root->val & 1);
191}
192static inline void set_root_present(struct root_entry *root)
193{
194 root->val |= 1;
195}
196static inline void set_root_value(struct root_entry *root, unsigned long value)
197{
198 root->val |= value & VTD_PAGE_MASK;
199}
200
201static inline struct context_entry *
202get_context_addr_from_root(struct root_entry *root)
203{
204 return (struct context_entry *)
205 (root_present(root)?phys_to_virt(
206 root->val & VTD_PAGE_MASK) :
207 NULL);
208}
209
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000210/*
211 * low 64 bits:
212 * 0: present
213 * 1: fault processing disable
214 * 2-3: translation type
215 * 12-63: address space root
216 * high 64 bits:
217 * 0-2: address width
218 * 3-6: aval
219 * 8-23: domain id
220 */
221struct context_entry {
222 u64 lo;
223 u64 hi;
224};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000225
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000226static inline bool context_present(struct context_entry *context)
227{
228 return (context->lo & 1);
229}
230static inline void context_set_present(struct context_entry *context)
231{
232 context->lo |= 1;
233}
234
235static inline void context_set_fault_enable(struct context_entry *context)
236{
237 context->lo &= (((u64)-1) << 2) | 1;
238}
239
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000240static inline void context_set_translation_type(struct context_entry *context,
241 unsigned long value)
242{
243 context->lo &= (((u64)-1) << 4) | 3;
244 context->lo |= (value & 3) << 2;
245}
246
247static inline void context_set_address_root(struct context_entry *context,
248 unsigned long value)
249{
250 context->lo |= value & VTD_PAGE_MASK;
251}
252
253static inline void context_set_address_width(struct context_entry *context,
254 unsigned long value)
255{
256 context->hi |= value & 7;
257}
258
259static inline void context_set_domain_id(struct context_entry *context,
260 unsigned long value)
261{
262 context->hi |= (value & ((1 << 16) - 1)) << 8;
263}
264
265static inline void context_clear_entry(struct context_entry *context)
266{
267 context->lo = 0;
268 context->hi = 0;
269}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000270
Mark McLoughlin622ba122008-11-20 15:49:46 +0000271/*
272 * 0: readable
273 * 1: writable
274 * 2-6: reserved
275 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800276 * 8-10: available
277 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000278 * 12-63: Host physcial address
279 */
280struct dma_pte {
281 u64 val;
282};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000283
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000284static inline void dma_clear_pte(struct dma_pte *pte)
285{
286 pte->val = 0;
287}
288
289static inline void dma_set_pte_readable(struct dma_pte *pte)
290{
291 pte->val |= DMA_PTE_READ;
292}
293
294static inline void dma_set_pte_writable(struct dma_pte *pte)
295{
296 pte->val |= DMA_PTE_WRITE;
297}
298
Sheng Yang9cf066972009-03-18 15:33:07 +0800299static inline void dma_set_pte_snp(struct dma_pte *pte)
300{
301 pte->val |= DMA_PTE_SNP;
302}
303
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000304static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
305{
306 pte->val = (pte->val & ~3) | (prot & 3);
307}
308
309static inline u64 dma_pte_addr(struct dma_pte *pte)
310{
David Woodhousec85994e2009-07-01 19:21:24 +0100311#ifdef CONFIG_64BIT
312 return pte->val & VTD_PAGE_MASK;
313#else
314 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100315 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100316#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000317}
318
David Woodhousedd4e8312009-06-27 16:21:20 +0100319static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000320{
David Woodhousedd4e8312009-06-27 16:21:20 +0100321 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000322}
323
324static inline bool dma_pte_present(struct dma_pte *pte)
325{
326 return (pte->val & 3) != 0;
327}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000328
Allen Kay4399c8b2011-10-14 12:32:46 -0700329static inline bool dma_pte_superpage(struct dma_pte *pte)
330{
331 return (pte->val & (1 << 7));
332}
333
David Woodhouse75e6bf92009-07-02 11:21:16 +0100334static inline int first_pte_in_page(struct dma_pte *pte)
335{
336 return !((unsigned long)pte & ~VTD_PAGE_MASK);
337}
338
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700339/*
340 * This domain is a statically identity mapping domain.
341 * 1. This domain creats a static 1:1 mapping to all usable memory.
342 * 2. It maps to each iommu if successful.
343 * 3. Each iommu mapps to this domain if successful.
344 */
David Woodhouse19943b02009-08-04 16:19:20 +0100345static struct dmar_domain *si_domain;
346static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700347
Weidong Han3b5410e2008-12-08 09:17:15 +0800348/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100349#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800350
Weidong Han1ce28fe2008-12-08 16:35:39 +0800351/* domain represents a virtual machine, more than one devices
352 * across iommus may be owned in one domain, e.g. kvm guest.
353 */
354#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
355
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700356/* si_domain contains mulitple devices */
357#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
358
Mark McLoughlin99126f72008-11-20 15:49:47 +0000359struct dmar_domain {
360 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700361 int nid; /* node id */
Weidong Han8c11e792008-12-08 15:29:22 +0800362 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000363
364 struct list_head devices; /* all devices' list */
365 struct iova_domain iovad; /* iova's that belong to this domain */
366
367 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000368 int gaw; /* max guest address width */
369
370 /* adjusted guest address width, 0 is level 2 30-bit */
371 int agaw;
372
Weidong Han3b5410e2008-12-08 09:17:15 +0800373 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800374
375 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800376 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800377 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100378 int iommu_superpage;/* Level of superpages supported:
379 0 == 4KiB (no superpages), 1 == 2MiB,
380 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800381 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800382 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000383};
384
Mark McLoughlina647dac2008-11-20 15:49:48 +0000385/* PCI domain-device relationship */
386struct device_domain_info {
387 struct list_head link; /* link to domain siblings */
388 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100389 int segment; /* PCI domain */
390 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000391 u8 devfn; /* PCI devfn number */
Stefan Assmann45e829e2009-12-03 06:49:24 -0500392 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800393 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000394 struct dmar_domain *domain; /* pointer to domain */
395};
396
mark gross5e0d2a62008-03-04 15:22:08 -0800397static void flush_unmaps_timeout(unsigned long data);
398
399DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
400
mark gross80b20dd2008-04-18 13:53:58 -0700401#define HIGH_WATER_MARK 250
402struct deferred_flush_tables {
403 int next;
404 struct iova *iova[HIGH_WATER_MARK];
405 struct dmar_domain *domain[HIGH_WATER_MARK];
406};
407
408static struct deferred_flush_tables *deferred_flush;
409
mark gross5e0d2a62008-03-04 15:22:08 -0800410/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800411static int g_num_of_iommus;
412
413static DEFINE_SPINLOCK(async_umap_flush_lock);
414static LIST_HEAD(unmaps_to_do);
415
416static int timer_on;
417static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800418
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700419static void domain_remove_dev_info(struct dmar_domain *domain);
420
Suresh Siddhad3f13812011-08-23 17:05:25 -0700421#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800422int dmar_disabled = 0;
423#else
424int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700425#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800426
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200427int intel_iommu_enabled = 0;
428EXPORT_SYMBOL_GPL(intel_iommu_enabled);
429
David Woodhouse2d9e6672010-06-15 10:57:57 +0100430static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700431static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800432static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100433static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700434
David Woodhousec0771df2011-10-14 20:59:46 +0100435int intel_iommu_gfx_mapped;
436EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
437
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700438#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
439static DEFINE_SPINLOCK(device_domain_lock);
440static LIST_HEAD(device_domain_list);
441
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100442static struct iommu_ops intel_iommu_ops;
443
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700444static int __init intel_iommu_setup(char *str)
445{
446 if (!str)
447 return -EINVAL;
448 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800449 if (!strncmp(str, "on", 2)) {
450 dmar_disabled = 0;
451 printk(KERN_INFO "Intel-IOMMU: enabled\n");
452 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700453 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800454 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700455 } else if (!strncmp(str, "igfx_off", 8)) {
456 dmar_map_gfx = 0;
457 printk(KERN_INFO
458 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700459 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800460 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700461 "Intel-IOMMU: Forcing DAC for PCI devices\n");
462 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800463 } else if (!strncmp(str, "strict", 6)) {
464 printk(KERN_INFO
465 "Intel-IOMMU: disable batched IOTLB flush\n");
466 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100467 } else if (!strncmp(str, "sp_off", 6)) {
468 printk(KERN_INFO
469 "Intel-IOMMU: disable supported super page\n");
470 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700471 }
472
473 str += strcspn(str, ",");
474 while (*str == ',')
475 str++;
476 }
477 return 0;
478}
479__setup("intel_iommu=", intel_iommu_setup);
480
481static struct kmem_cache *iommu_domain_cache;
482static struct kmem_cache *iommu_devinfo_cache;
483static struct kmem_cache *iommu_iova_cache;
484
Suresh Siddha4c923d42009-10-02 11:01:24 -0700485static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700486{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700487 struct page *page;
488 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700489
Suresh Siddha4c923d42009-10-02 11:01:24 -0700490 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
491 if (page)
492 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700493 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700494}
495
496static inline void free_pgtable_page(void *vaddr)
497{
498 free_page((unsigned long)vaddr);
499}
500
501static inline void *alloc_domain_mem(void)
502{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900503 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700504}
505
Kay, Allen M38717942008-09-09 18:37:29 +0300506static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700507{
508 kmem_cache_free(iommu_domain_cache, vaddr);
509}
510
511static inline void * alloc_devinfo_mem(void)
512{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900513 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700514}
515
516static inline void free_devinfo_mem(void *vaddr)
517{
518 kmem_cache_free(iommu_devinfo_cache, vaddr);
519}
520
521struct iova *alloc_iova_mem(void)
522{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900523 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700524}
525
526void free_iova_mem(struct iova *iova)
527{
528 kmem_cache_free(iommu_iova_cache, iova);
529}
530
Weidong Han1b573682008-12-08 15:34:06 +0800531
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700532static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800533{
534 unsigned long sagaw;
535 int agaw = -1;
536
537 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700538 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800539 agaw >= 0; agaw--) {
540 if (test_bit(agaw, &sagaw))
541 break;
542 }
543
544 return agaw;
545}
546
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700547/*
548 * Calculate max SAGAW for each iommu.
549 */
550int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
551{
552 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
553}
554
555/*
556 * calculate agaw for each iommu.
557 * "SAGAW" may be different across iommus, use a default agaw, and
558 * get a supported less agaw for iommus that don't support the default agaw.
559 */
560int iommu_calculate_agaw(struct intel_iommu *iommu)
561{
562 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
563}
564
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700565/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800566static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
567{
568 int iommu_id;
569
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700570 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800571 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700572 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800573
Weidong Han8c11e792008-12-08 15:29:22 +0800574 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
575 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
576 return NULL;
577
578 return g_iommus[iommu_id];
579}
580
Weidong Han8e6040972008-12-08 15:49:06 +0800581static void domain_update_iommu_coherency(struct dmar_domain *domain)
582{
583 int i;
584
585 domain->iommu_coherency = 1;
586
Akinobu Mitaa45946a2010-03-11 14:04:08 -0800587 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
Weidong Han8e6040972008-12-08 15:49:06 +0800588 if (!ecap_coherent(g_iommus[i]->ecap)) {
589 domain->iommu_coherency = 0;
590 break;
591 }
Weidong Han8e6040972008-12-08 15:49:06 +0800592 }
593}
594
Sheng Yang58c610b2009-03-18 15:33:05 +0800595static void domain_update_iommu_snooping(struct dmar_domain *domain)
596{
597 int i;
598
599 domain->iommu_snooping = 1;
600
Akinobu Mitaa45946a2010-03-11 14:04:08 -0800601 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800602 if (!ecap_sc_support(g_iommus[i]->ecap)) {
603 domain->iommu_snooping = 0;
604 break;
605 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800606 }
607}
608
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100609static void domain_update_iommu_superpage(struct dmar_domain *domain)
610{
Allen Kay8140a952011-10-14 12:32:17 -0700611 struct dmar_drhd_unit *drhd;
612 struct intel_iommu *iommu = NULL;
613 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100614
615 if (!intel_iommu_superpage) {
616 domain->iommu_superpage = 0;
617 return;
618 }
619
Allen Kay8140a952011-10-14 12:32:17 -0700620 /* set iommu_superpage to the smallest common denominator */
621 for_each_active_iommu(iommu, drhd) {
622 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100623 if (!mask) {
624 break;
625 }
626 }
627 domain->iommu_superpage = fls(mask);
628}
629
Sheng Yang58c610b2009-03-18 15:33:05 +0800630/* Some capabilities may be different across iommus */
631static void domain_update_iommu_cap(struct dmar_domain *domain)
632{
633 domain_update_iommu_coherency(domain);
634 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100635 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800636}
637
David Woodhouse276dbf992009-04-04 01:45:37 +0100638static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800639{
640 struct dmar_drhd_unit *drhd = NULL;
641 int i;
642
643 for_each_drhd_unit(drhd) {
644 if (drhd->ignored)
645 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100646 if (segment != drhd->segment)
647 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800648
David Woodhouse924b6232009-04-04 00:39:25 +0100649 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000650 if (drhd->devices[i] &&
651 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800652 drhd->devices[i]->devfn == devfn)
653 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700654 if (drhd->devices[i] &&
655 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100656 drhd->devices[i]->subordinate->number <= bus &&
657 drhd->devices[i]->subordinate->subordinate >= bus)
658 return drhd->iommu;
659 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800660
661 if (drhd->include_all)
662 return drhd->iommu;
663 }
664
665 return NULL;
666}
667
Weidong Han5331fe62008-12-08 23:00:00 +0800668static void domain_flush_cache(struct dmar_domain *domain,
669 void *addr, int size)
670{
671 if (!domain->iommu_coherency)
672 clflush_cache_range(addr, size);
673}
674
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700675/* Gets context entry for a given bus and devfn */
676static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
677 u8 bus, u8 devfn)
678{
679 struct root_entry *root;
680 struct context_entry *context;
681 unsigned long phy_addr;
682 unsigned long flags;
683
684 spin_lock_irqsave(&iommu->lock, flags);
685 root = &iommu->root_entry[bus];
686 context = get_context_addr_from_root(root);
687 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700688 context = (struct context_entry *)
689 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700690 if (!context) {
691 spin_unlock_irqrestore(&iommu->lock, flags);
692 return NULL;
693 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700694 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700695 phy_addr = virt_to_phys((void *)context);
696 set_root_value(root, phy_addr);
697 set_root_present(root);
698 __iommu_flush_cache(iommu, root, sizeof(*root));
699 }
700 spin_unlock_irqrestore(&iommu->lock, flags);
701 return &context[devfn];
702}
703
704static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
705{
706 struct root_entry *root;
707 struct context_entry *context;
708 int ret;
709 unsigned long flags;
710
711 spin_lock_irqsave(&iommu->lock, flags);
712 root = &iommu->root_entry[bus];
713 context = get_context_addr_from_root(root);
714 if (!context) {
715 ret = 0;
716 goto out;
717 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000718 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700719out:
720 spin_unlock_irqrestore(&iommu->lock, flags);
721 return ret;
722}
723
724static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
725{
726 struct root_entry *root;
727 struct context_entry *context;
728 unsigned long flags;
729
730 spin_lock_irqsave(&iommu->lock, flags);
731 root = &iommu->root_entry[bus];
732 context = get_context_addr_from_root(root);
733 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000734 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700735 __iommu_flush_cache(iommu, &context[devfn], \
736 sizeof(*context));
737 }
738 spin_unlock_irqrestore(&iommu->lock, flags);
739}
740
741static void free_context_table(struct intel_iommu *iommu)
742{
743 struct root_entry *root;
744 int i;
745 unsigned long flags;
746 struct context_entry *context;
747
748 spin_lock_irqsave(&iommu->lock, flags);
749 if (!iommu->root_entry) {
750 goto out;
751 }
752 for (i = 0; i < ROOT_ENTRY_NR; i++) {
753 root = &iommu->root_entry[i];
754 context = get_context_addr_from_root(root);
755 if (context)
756 free_pgtable_page(context);
757 }
758 free_pgtable_page(iommu->root_entry);
759 iommu->root_entry = NULL;
760out:
761 spin_unlock_irqrestore(&iommu->lock, flags);
762}
763
David Woodhouseb026fd22009-06-28 10:37:25 +0100764static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
Allen Kay4399c8b2011-10-14 12:32:46 -0700765 unsigned long pfn, int target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700766{
David Woodhouseb026fd22009-06-28 10:37:25 +0100767 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700768 struct dma_pte *parent, *pte = NULL;
769 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700770 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700771
772 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100773 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700774 parent = domain->pgd;
775
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700776 while (level > 0) {
777 void *tmp_page;
778
David Woodhouseb026fd22009-06-28 10:37:25 +0100779 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700780 pte = &parent[offset];
Allen Kay4399c8b2011-10-14 12:32:46 -0700781 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100782 break;
783 if (level == target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700784 break;
785
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000786 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100787 uint64_t pteval;
788
Suresh Siddha4c923d42009-10-02 11:01:24 -0700789 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700790
David Woodhouse206a73c12009-07-01 19:30:28 +0100791 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700792 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100793
David Woodhousec85994e2009-07-01 19:21:24 +0100794 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400795 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100796 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
797 /* Someone else set it while we were thinking; use theirs. */
798 free_pgtable_page(tmp_page);
799 } else {
800 dma_pte_addr(pte);
801 domain_flush_cache(domain, pte, sizeof(*pte));
802 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700803 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000804 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700805 level--;
806 }
807
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700808 return pte;
809}
810
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100811
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700812/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100813static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
814 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100815 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700816{
817 struct dma_pte *parent, *pte = NULL;
818 int total = agaw_to_level(domain->agaw);
819 int offset;
820
821 parent = domain->pgd;
822 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100823 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700824 pte = &parent[offset];
825 if (level == total)
826 return pte;
827
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100828 if (!dma_pte_present(pte)) {
829 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700830 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100831 }
832
833 if (pte->val & DMA_PTE_LARGE_PAGE) {
834 *large_page = total;
835 return pte;
836 }
837
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000838 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700839 total--;
840 }
841 return NULL;
842}
843
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700844/* clear last level pte, a tlb flush should be followed */
Allen Kay292827c2011-10-14 12:31:54 -0700845static int dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100846 unsigned long start_pfn,
847 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700848{
David Woodhouse04b18e62009-06-27 19:15:01 +0100849 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100850 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100851 struct dma_pte *first_pte, *pte;
Allen Kay292827c2011-10-14 12:31:54 -0700852 int order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700853
David Woodhouse04b18e62009-06-27 19:15:01 +0100854 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100855 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700856 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100857
David Woodhouse04b18e62009-06-27 19:15:01 +0100858 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700859 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100860 large_page = 1;
861 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100862 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100863 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100864 continue;
865 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100866 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100867 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100868 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100869 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100870 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
871
David Woodhouse310a5ab2009-06-28 18:52:20 +0100872 domain_flush_cache(domain, first_pte,
873 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700874
875 } while (start_pfn && start_pfn <= last_pfn);
Allen Kay292827c2011-10-14 12:31:54 -0700876
877 order = (large_page - 1) * 9;
878 return order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700879}
880
881/* free page table pages. last level pte should already be cleared */
882static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100883 unsigned long start_pfn,
884 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700885{
David Woodhouse6660c632009-06-27 22:41:00 +0100886 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhousef3a0a522009-06-30 03:40:07 +0100887 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700888 int total = agaw_to_level(domain->agaw);
889 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100890 unsigned long tmp;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100891 int large_page = 2;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700892
David Woodhouse6660c632009-06-27 22:41:00 +0100893 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
894 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700895 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700896
David Woodhousef3a0a522009-06-30 03:40:07 +0100897 /* We don't need lock here; nobody else touches the iova range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700898 level = 2;
899 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100900 tmp = align_to_level(start_pfn, level);
901
David Woodhousef3a0a522009-06-30 03:40:07 +0100902 /* If we can't even clear one PTE at this level, we're done */
David Woodhouse6660c632009-06-27 22:41:00 +0100903 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700904 return;
905
David Woodhouse59c36282009-09-19 07:36:28 -0700906 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100907 large_page = level;
908 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
909 if (large_page > level)
910 level = large_page + 1;
David Woodhousef3a0a522009-06-30 03:40:07 +0100911 if (!pte) {
912 tmp = align_to_level(tmp + 1, level + 1);
913 continue;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700914 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100915 do {
David Woodhouse6a43e572009-07-02 12:02:34 +0100916 if (dma_pte_present(pte)) {
917 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
918 dma_clear_pte(pte);
919 }
David Woodhousef3a0a522009-06-30 03:40:07 +0100920 pte++;
921 tmp += level_size(level);
David Woodhouse75e6bf92009-07-02 11:21:16 +0100922 } while (!first_pte_in_page(pte) &&
923 tmp + level_size(level) - 1 <= last_pfn);
924
David Woodhousef3a0a522009-06-30 03:40:07 +0100925 domain_flush_cache(domain, first_pte,
926 (void *)pte - (void *)first_pte);
927
David Woodhouse59c36282009-09-19 07:36:28 -0700928 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700929 level++;
930 }
931 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100932 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700933 free_pgtable_page(domain->pgd);
934 domain->pgd = NULL;
935 }
936}
937
938/* iommu handling */
939static int iommu_alloc_root_entry(struct intel_iommu *iommu)
940{
941 struct root_entry *root;
942 unsigned long flags;
943
Suresh Siddha4c923d42009-10-02 11:01:24 -0700944 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700945 if (!root)
946 return -ENOMEM;
947
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700948 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700949
950 spin_lock_irqsave(&iommu->lock, flags);
951 iommu->root_entry = root;
952 spin_unlock_irqrestore(&iommu->lock, flags);
953
954 return 0;
955}
956
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700957static void iommu_set_root_entry(struct intel_iommu *iommu)
958{
959 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100960 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700961 unsigned long flag;
962
963 addr = iommu->root_entry;
964
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200965 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700966 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
967
David Woodhousec416daa2009-05-10 20:30:58 +0100968 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700969
970 /* Make sure hardware complete it */
971 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100972 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700973
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200974 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700975}
976
977static void iommu_flush_write_buffer(struct intel_iommu *iommu)
978{
979 u32 val;
980 unsigned long flag;
981
David Woodhouse9af88142009-02-13 23:18:03 +0000982 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700983 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700984
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200985 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100986 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700987
988 /* Make sure hardware complete it */
989 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100990 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700991
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200992 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700993}
994
995/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100996static void __iommu_flush_context(struct intel_iommu *iommu,
997 u16 did, u16 source_id, u8 function_mask,
998 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700999{
1000 u64 val = 0;
1001 unsigned long flag;
1002
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001003 switch (type) {
1004 case DMA_CCMD_GLOBAL_INVL:
1005 val = DMA_CCMD_GLOBAL_INVL;
1006 break;
1007 case DMA_CCMD_DOMAIN_INVL:
1008 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1009 break;
1010 case DMA_CCMD_DEVICE_INVL:
1011 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1012 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1013 break;
1014 default:
1015 BUG();
1016 }
1017 val |= DMA_CCMD_ICC;
1018
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001019 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001020 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1021
1022 /* Make sure hardware complete it */
1023 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1024 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1025
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001026 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001027}
1028
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001029/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001030static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1031 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001032{
1033 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1034 u64 val = 0, val_iva = 0;
1035 unsigned long flag;
1036
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001037 switch (type) {
1038 case DMA_TLB_GLOBAL_FLUSH:
1039 /* global flush doesn't need set IVA_REG */
1040 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1041 break;
1042 case DMA_TLB_DSI_FLUSH:
1043 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1044 break;
1045 case DMA_TLB_PSI_FLUSH:
1046 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1047 /* Note: always flush non-leaf currently */
1048 val_iva = size_order | addr;
1049 break;
1050 default:
1051 BUG();
1052 }
1053 /* Note: set drain read/write */
1054#if 0
1055 /*
1056 * This is probably to be super secure.. Looks like we can
1057 * ignore it without any impact.
1058 */
1059 if (cap_read_drain(iommu->cap))
1060 val |= DMA_TLB_READ_DRAIN;
1061#endif
1062 if (cap_write_drain(iommu->cap))
1063 val |= DMA_TLB_WRITE_DRAIN;
1064
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001065 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001066 /* Note: Only uses first TLB reg currently */
1067 if (val_iva)
1068 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1069 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1070
1071 /* Make sure hardware complete it */
1072 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1073 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1074
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001075 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001076
1077 /* check IOTLB invalidation granularity */
1078 if (DMA_TLB_IAIG(val) == 0)
1079 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1080 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1081 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001082 (unsigned long long)DMA_TLB_IIRG(type),
1083 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001084}
1085
Yu Zhao93a23a72009-05-18 13:51:37 +08001086static struct device_domain_info *iommu_support_dev_iotlb(
1087 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001088{
Yu Zhao93a23a72009-05-18 13:51:37 +08001089 int found = 0;
1090 unsigned long flags;
1091 struct device_domain_info *info;
1092 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1093
1094 if (!ecap_dev_iotlb_support(iommu->ecap))
1095 return NULL;
1096
1097 if (!iommu->qi)
1098 return NULL;
1099
1100 spin_lock_irqsave(&device_domain_lock, flags);
1101 list_for_each_entry(info, &domain->devices, link)
1102 if (info->bus == bus && info->devfn == devfn) {
1103 found = 1;
1104 break;
1105 }
1106 spin_unlock_irqrestore(&device_domain_lock, flags);
1107
1108 if (!found || !info->dev)
1109 return NULL;
1110
1111 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1112 return NULL;
1113
1114 if (!dmar_find_matched_atsr_unit(info->dev))
1115 return NULL;
1116
1117 info->iommu = iommu;
1118
1119 return info;
1120}
1121
1122static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1123{
1124 if (!info)
1125 return;
1126
1127 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1128}
1129
1130static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1131{
1132 if (!info->dev || !pci_ats_enabled(info->dev))
1133 return;
1134
1135 pci_disable_ats(info->dev);
1136}
1137
1138static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1139 u64 addr, unsigned mask)
1140{
1141 u16 sid, qdep;
1142 unsigned long flags;
1143 struct device_domain_info *info;
1144
1145 spin_lock_irqsave(&device_domain_lock, flags);
1146 list_for_each_entry(info, &domain->devices, link) {
1147 if (!info->dev || !pci_ats_enabled(info->dev))
1148 continue;
1149
1150 sid = info->bus << 8 | info->devfn;
1151 qdep = pci_ats_queue_depth(info->dev);
1152 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1153 }
1154 spin_unlock_irqrestore(&device_domain_lock, flags);
1155}
1156
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001157static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
Nadav Amit82653632010-04-01 13:24:40 +03001158 unsigned long pfn, unsigned int pages, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001159{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001160 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001161 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001162
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001163 BUG_ON(pages == 0);
1164
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001165 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001166 * Fallback to domain selective flush if no PSI support or the size is
1167 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001168 * PSI requires page size to be 2 ^ x, and the base address is naturally
1169 * aligned to the size
1170 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001171 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1172 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001173 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001174 else
1175 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1176 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001177
1178 /*
Nadav Amit82653632010-04-01 13:24:40 +03001179 * In caching mode, changes of pages from non-present to present require
1180 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001181 */
Nadav Amit82653632010-04-01 13:24:40 +03001182 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001183 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001184}
1185
mark grossf8bab732008-02-08 04:18:38 -08001186static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1187{
1188 u32 pmen;
1189 unsigned long flags;
1190
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001191 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001192 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1193 pmen &= ~DMA_PMEN_EPM;
1194 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1195
1196 /* wait for the protected region status bit to clear */
1197 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1198 readl, !(pmen & DMA_PMEN_PRS), pmen);
1199
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001200 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001201}
1202
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001203static int iommu_enable_translation(struct intel_iommu *iommu)
1204{
1205 u32 sts;
1206 unsigned long flags;
1207
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001208 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001209 iommu->gcmd |= DMA_GCMD_TE;
1210 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001211
1212 /* Make sure hardware complete it */
1213 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001214 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001215
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001216 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001217 return 0;
1218}
1219
1220static int iommu_disable_translation(struct intel_iommu *iommu)
1221{
1222 u32 sts;
1223 unsigned long flag;
1224
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001225 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001226 iommu->gcmd &= ~DMA_GCMD_TE;
1227 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1228
1229 /* Make sure hardware complete it */
1230 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001231 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001232
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001233 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001234 return 0;
1235}
1236
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001237
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001238static int iommu_init_domains(struct intel_iommu *iommu)
1239{
1240 unsigned long ndomains;
1241 unsigned long nlongs;
1242
1243 ndomains = cap_ndoms(iommu->cap);
Yinghai Lu680a7522010-04-08 19:58:23 +01001244 pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
1245 ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001246 nlongs = BITS_TO_LONGS(ndomains);
1247
Donald Dutile94a91b52009-08-20 16:51:34 -04001248 spin_lock_init(&iommu->lock);
1249
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001250 /* TBD: there might be 64K domains,
1251 * consider other allocation for future chip
1252 */
1253 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1254 if (!iommu->domain_ids) {
1255 printk(KERN_ERR "Allocating domain id array failed\n");
1256 return -ENOMEM;
1257 }
1258 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1259 GFP_KERNEL);
1260 if (!iommu->domains) {
1261 printk(KERN_ERR "Allocating domain array failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001262 return -ENOMEM;
1263 }
1264
1265 /*
1266 * if Caching mode is set, then invalid translations are tagged
1267 * with domainid 0. Hence we need to pre-allocate it.
1268 */
1269 if (cap_caching_mode(iommu->cap))
1270 set_bit(0, iommu->domain_ids);
1271 return 0;
1272}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001273
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001274
1275static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001276static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001277
1278void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001279{
1280 struct dmar_domain *domain;
1281 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001282 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001283
Donald Dutile94a91b52009-08-20 16:51:34 -04001284 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001285 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Donald Dutile94a91b52009-08-20 16:51:34 -04001286 domain = iommu->domains[i];
1287 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001288
Donald Dutile94a91b52009-08-20 16:51:34 -04001289 spin_lock_irqsave(&domain->iommu_lock, flags);
1290 if (--domain->iommu_count == 0) {
1291 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1292 vm_domain_exit(domain);
1293 else
1294 domain_exit(domain);
1295 }
1296 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001297 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001298 }
1299
1300 if (iommu->gcmd & DMA_GCMD_TE)
1301 iommu_disable_translation(iommu);
1302
1303 if (iommu->irq) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001304 irq_set_handler_data(iommu->irq, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001305 /* This will mask the irq */
1306 free_irq(iommu->irq, iommu);
1307 destroy_irq(iommu->irq);
1308 }
1309
1310 kfree(iommu->domains);
1311 kfree(iommu->domain_ids);
1312
Weidong Hand9630fe2008-12-08 11:06:32 +08001313 g_iommus[iommu->seq_id] = NULL;
1314
1315 /* if all iommus are freed, free g_iommus */
1316 for (i = 0; i < g_num_of_iommus; i++) {
1317 if (g_iommus[i])
1318 break;
1319 }
1320
1321 if (i == g_num_of_iommus)
1322 kfree(g_iommus);
1323
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001324 /* free context mapping */
1325 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001326}
1327
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001328static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001329{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001330 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001331
1332 domain = alloc_domain_mem();
1333 if (!domain)
1334 return NULL;
1335
Suresh Siddha4c923d42009-10-02 11:01:24 -07001336 domain->nid = -1;
Weidong Han8c11e792008-12-08 15:29:22 +08001337 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
Weidong Hand71a2f32008-12-07 21:13:41 +08001338 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001339
1340 return domain;
1341}
1342
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001343static int iommu_attach_domain(struct dmar_domain *domain,
1344 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001345{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001346 int num;
1347 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001348 unsigned long flags;
1349
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001350 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001351
1352 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001353
1354 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1355 if (num >= ndomains) {
1356 spin_unlock_irqrestore(&iommu->lock, flags);
1357 printk(KERN_ERR "IOMMU: no free domain ids\n");
1358 return -ENOMEM;
1359 }
1360
1361 domain->id = num;
1362 set_bit(num, iommu->domain_ids);
1363 set_bit(iommu->seq_id, &domain->iommu_bmp);
1364 iommu->domains[num] = domain;
1365 spin_unlock_irqrestore(&iommu->lock, flags);
1366
1367 return 0;
1368}
1369
1370static void iommu_detach_domain(struct dmar_domain *domain,
1371 struct intel_iommu *iommu)
1372{
1373 unsigned long flags;
1374 int num, ndomains;
1375 int found = 0;
1376
1377 spin_lock_irqsave(&iommu->lock, flags);
1378 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001379 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001380 if (iommu->domains[num] == domain) {
1381 found = 1;
1382 break;
1383 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001384 }
1385
1386 if (found) {
1387 clear_bit(num, iommu->domain_ids);
1388 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1389 iommu->domains[num] = NULL;
1390 }
Weidong Han8c11e792008-12-08 15:29:22 +08001391 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001392}
1393
1394static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001395static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001396
Joseph Cihula51a63e62011-03-21 11:04:24 -07001397static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001398{
1399 struct pci_dev *pdev = NULL;
1400 struct iova *iova;
1401 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001402
David Millerf6611972008-02-06 01:36:23 -08001403 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001404
Mark Gross8a443df2008-03-04 14:59:31 -08001405 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1406 &reserved_rbtree_key);
1407
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001408 /* IOAPIC ranges shouldn't be accessed by DMA */
1409 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1410 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001411 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001412 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001413 return -ENODEV;
1414 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001415
1416 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1417 for_each_pci_dev(pdev) {
1418 struct resource *r;
1419
1420 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1421 r = &pdev->resource[i];
1422 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1423 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001424 iova = reserve_iova(&reserved_iova_list,
1425 IOVA_PFN(r->start),
1426 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001427 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001428 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001429 return -ENODEV;
1430 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001431 }
1432 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001433 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001434}
1435
1436static void domain_reserve_special_ranges(struct dmar_domain *domain)
1437{
1438 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1439}
1440
1441static inline int guestwidth_to_adjustwidth(int gaw)
1442{
1443 int agaw;
1444 int r = (gaw - 12) % 9;
1445
1446 if (r == 0)
1447 agaw = gaw;
1448 else
1449 agaw = gaw + 9 - r;
1450 if (agaw > 64)
1451 agaw = 64;
1452 return agaw;
1453}
1454
1455static int domain_init(struct dmar_domain *domain, int guest_width)
1456{
1457 struct intel_iommu *iommu;
1458 int adjust_width, agaw;
1459 unsigned long sagaw;
1460
David Millerf6611972008-02-06 01:36:23 -08001461 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001462 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001463
1464 domain_reserve_special_ranges(domain);
1465
1466 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001467 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001468 if (guest_width > cap_mgaw(iommu->cap))
1469 guest_width = cap_mgaw(iommu->cap);
1470 domain->gaw = guest_width;
1471 adjust_width = guestwidth_to_adjustwidth(guest_width);
1472 agaw = width_to_agaw(adjust_width);
1473 sagaw = cap_sagaw(iommu->cap);
1474 if (!test_bit(agaw, &sagaw)) {
1475 /* hardware doesn't support it, choose a bigger one */
1476 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1477 agaw = find_next_bit(&sagaw, 5, agaw);
1478 if (agaw >= 5)
1479 return -ENODEV;
1480 }
1481 domain->agaw = agaw;
1482 INIT_LIST_HEAD(&domain->devices);
1483
Weidong Han8e6040972008-12-08 15:49:06 +08001484 if (ecap_coherent(iommu->ecap))
1485 domain->iommu_coherency = 1;
1486 else
1487 domain->iommu_coherency = 0;
1488
Sheng Yang58c610b2009-03-18 15:33:05 +08001489 if (ecap_sc_support(iommu->ecap))
1490 domain->iommu_snooping = 1;
1491 else
1492 domain->iommu_snooping = 0;
1493
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001494 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
Weidong Hanc7151a82008-12-08 22:51:37 +08001495 domain->iommu_count = 1;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001496 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001497
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001498 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001499 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001500 if (!domain->pgd)
1501 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001502 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001503 return 0;
1504}
1505
1506static void domain_exit(struct dmar_domain *domain)
1507{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001508 struct dmar_drhd_unit *drhd;
1509 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001510
1511 /* Domain 0 is reserved, so dont process it */
1512 if (!domain)
1513 return;
1514
Alex Williamson7b668352011-05-24 12:02:41 +01001515 /* Flush any lazy unmaps that may reference this domain */
1516 if (!intel_iommu_strict)
1517 flush_unmaps_timeout(0);
1518
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001519 domain_remove_dev_info(domain);
1520 /* destroy iovas */
1521 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001522
1523 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001524 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001525
1526 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001527 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001528
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001529 for_each_active_iommu(iommu, drhd)
1530 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1531 iommu_detach_domain(domain, iommu);
1532
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001533 free_domain_mem(domain);
1534}
1535
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001536static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1537 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001538{
1539 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001540 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001541 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001542 struct dma_pte *pgd;
1543 unsigned long num;
1544 unsigned long ndomains;
1545 int id;
1546 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001547 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001548
1549 pr_debug("Set context mapping for %02x:%02x.%d\n",
1550 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001551
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001552 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001553 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1554 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001555
David Woodhouse276dbf992009-04-04 01:45:37 +01001556 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001557 if (!iommu)
1558 return -ENODEV;
1559
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001560 context = device_to_context_entry(iommu, bus, devfn);
1561 if (!context)
1562 return -ENOMEM;
1563 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001564 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001565 spin_unlock_irqrestore(&iommu->lock, flags);
1566 return 0;
1567 }
1568
Weidong Hanea6606b2008-12-08 23:08:15 +08001569 id = domain->id;
1570 pgd = domain->pgd;
1571
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001572 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1573 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001574 int found = 0;
1575
1576 /* find an available domain id for this device in iommu */
1577 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001578 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001579 if (iommu->domains[num] == domain) {
1580 id = num;
1581 found = 1;
1582 break;
1583 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001584 }
1585
1586 if (found == 0) {
1587 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1588 if (num >= ndomains) {
1589 spin_unlock_irqrestore(&iommu->lock, flags);
1590 printk(KERN_ERR "IOMMU: no free domain ids\n");
1591 return -EFAULT;
1592 }
1593
1594 set_bit(num, iommu->domain_ids);
1595 iommu->domains[num] = domain;
1596 id = num;
1597 }
1598
1599 /* Skip top levels of page tables for
1600 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001601 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001602 */
Chris Wright1672af12009-12-02 12:06:34 -08001603 if (translation != CONTEXT_TT_PASS_THROUGH) {
1604 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1605 pgd = phys_to_virt(dma_pte_addr(pgd));
1606 if (!dma_pte_present(pgd)) {
1607 spin_unlock_irqrestore(&iommu->lock, flags);
1608 return -ENOMEM;
1609 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001610 }
1611 }
1612 }
1613
1614 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001615
Yu Zhao93a23a72009-05-18 13:51:37 +08001616 if (translation != CONTEXT_TT_PASS_THROUGH) {
1617 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1618 translation = info ? CONTEXT_TT_DEV_IOTLB :
1619 CONTEXT_TT_MULTI_LEVEL;
1620 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001621 /*
1622 * In pass through mode, AW must be programmed to indicate the largest
1623 * AGAW value supported by hardware. And ASR is ignored by hardware.
1624 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001625 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001626 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001627 else {
1628 context_set_address_root(context, virt_to_phys(pgd));
1629 context_set_address_width(context, iommu->agaw);
1630 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001631
1632 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001633 context_set_fault_enable(context);
1634 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001635 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001636
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001637 /*
1638 * It's a non-present to present mapping. If hardware doesn't cache
1639 * non-present entry we only need to flush the write-buffer. If the
1640 * _does_ cache non-present entries, then it does so in the special
1641 * domain #0, which we have to flush:
1642 */
1643 if (cap_caching_mode(iommu->cap)) {
1644 iommu->flush.flush_context(iommu, 0,
1645 (((u16)bus) << 8) | devfn,
1646 DMA_CCMD_MASK_NOBIT,
1647 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001648 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001649 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001650 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001651 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001652 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001653 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001654
1655 spin_lock_irqsave(&domain->iommu_lock, flags);
1656 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1657 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001658 if (domain->iommu_count == 1)
1659 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001660 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001661 }
1662 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001663 return 0;
1664}
1665
1666static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001667domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1668 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001669{
1670 int ret;
1671 struct pci_dev *tmp, *parent;
1672
David Woodhouse276dbf992009-04-04 01:45:37 +01001673 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001674 pdev->bus->number, pdev->devfn,
1675 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001676 if (ret)
1677 return ret;
1678
1679 /* dependent device mapping */
1680 tmp = pci_find_upstream_pcie_bridge(pdev);
1681 if (!tmp)
1682 return 0;
1683 /* Secondary interface's bus number and devfn 0 */
1684 parent = pdev->bus->self;
1685 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001686 ret = domain_context_mapping_one(domain,
1687 pci_domain_nr(parent->bus),
1688 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001689 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001690 if (ret)
1691 return ret;
1692 parent = parent->bus->self;
1693 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001694 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001695 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001696 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001697 tmp->subordinate->number, 0,
1698 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001699 else /* this is a legacy PCI bridge */
1700 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001701 pci_domain_nr(tmp->bus),
1702 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001703 tmp->devfn,
1704 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001705}
1706
Weidong Han5331fe62008-12-08 23:00:00 +08001707static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001708{
1709 int ret;
1710 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001711 struct intel_iommu *iommu;
1712
David Woodhouse276dbf992009-04-04 01:45:37 +01001713 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1714 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001715 if (!iommu)
1716 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001717
David Woodhouse276dbf992009-04-04 01:45:37 +01001718 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001719 if (!ret)
1720 return ret;
1721 /* dependent device mapping */
1722 tmp = pci_find_upstream_pcie_bridge(pdev);
1723 if (!tmp)
1724 return ret;
1725 /* Secondary interface's bus number and devfn 0 */
1726 parent = pdev->bus->self;
1727 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001728 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001729 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001730 if (!ret)
1731 return ret;
1732 parent = parent->bus->self;
1733 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001734 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001735 return device_context_mapped(iommu, tmp->subordinate->number,
1736 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001737 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001738 return device_context_mapped(iommu, tmp->bus->number,
1739 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001740}
1741
Fenghua Yuf5329592009-08-04 15:09:37 -07001742/* Returns a number of VTD pages, but aligned to MM page size */
1743static inline unsigned long aligned_nrpages(unsigned long host_addr,
1744 size_t size)
1745{
1746 host_addr &= ~PAGE_MASK;
1747 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1748}
1749
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001750/* Return largest possible superpage level for a given mapping */
1751static inline int hardware_largepage_caps(struct dmar_domain *domain,
1752 unsigned long iov_pfn,
1753 unsigned long phy_pfn,
1754 unsigned long pages)
1755{
1756 int support, level = 1;
1757 unsigned long pfnmerge;
1758
1759 support = domain->iommu_superpage;
1760
1761 /* To use a large page, the virtual *and* physical addresses
1762 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1763 of them will mean we have to use smaller pages. So just
1764 merge them and check both at once. */
1765 pfnmerge = iov_pfn | phy_pfn;
1766
1767 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1768 pages >>= VTD_STRIDE_SHIFT;
1769 if (!pages)
1770 break;
1771 pfnmerge >>= VTD_STRIDE_SHIFT;
1772 level++;
1773 support--;
1774 }
1775 return level;
1776}
1777
David Woodhouse9051aa02009-06-29 12:30:54 +01001778static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1779 struct scatterlist *sg, unsigned long phys_pfn,
1780 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001781{
1782 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001783 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001784 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001785 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001786 unsigned int largepage_lvl = 0;
1787 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001788
1789 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1790
1791 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1792 return -EINVAL;
1793
1794 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1795
David Woodhouse9051aa02009-06-29 12:30:54 +01001796 if (sg)
1797 sg_res = 0;
1798 else {
1799 sg_res = nr_pages + 1;
1800 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1801 }
1802
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001803 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001804 uint64_t tmp;
1805
David Woodhousee1605492009-06-29 11:17:38 +01001806 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001807 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001808 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1809 sg->dma_length = sg->length;
1810 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001811 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001812 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001813
David Woodhousee1605492009-06-29 11:17:38 +01001814 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001815 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1816
1817 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01001818 if (!pte)
1819 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001820 /* It is large page*/
1821 if (largepage_lvl > 1)
1822 pteval |= DMA_PTE_LARGE_PAGE;
1823 else
1824 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
1825
David Woodhousee1605492009-06-29 11:17:38 +01001826 }
1827 /* We don't need lock here, nobody else
1828 * touches the iova range
1829 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001830 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001831 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001832 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001833 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1834 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001835 if (dumps) {
1836 dumps--;
1837 debug_dma_dump_mappings(NULL);
1838 }
1839 WARN_ON(1);
1840 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001841
1842 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1843
1844 BUG_ON(nr_pages < lvl_pages);
1845 BUG_ON(sg_res < lvl_pages);
1846
1847 nr_pages -= lvl_pages;
1848 iov_pfn += lvl_pages;
1849 phys_pfn += lvl_pages;
1850 pteval += lvl_pages * VTD_PAGE_SIZE;
1851 sg_res -= lvl_pages;
1852
1853 /* If the next PTE would be the first in a new page, then we
1854 need to flush the cache on the entries we've just written.
1855 And then we'll need to recalculate 'pte', so clear it and
1856 let it get set again in the if (!pte) block above.
1857
1858 If we're done (!nr_pages) we need to flush the cache too.
1859
1860 Also if we've been setting superpages, we may need to
1861 recalculate 'pte' and switch back to smaller pages for the
1862 end of the mapping, if the trailing size is not enough to
1863 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01001864 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001865 if (!nr_pages || first_pte_in_page(pte) ||
1866 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01001867 domain_flush_cache(domain, first_pte,
1868 (void *)pte - (void *)first_pte);
1869 pte = NULL;
1870 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001871
1872 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01001873 sg = sg_next(sg);
1874 }
1875 return 0;
1876}
1877
David Woodhouse9051aa02009-06-29 12:30:54 +01001878static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1879 struct scatterlist *sg, unsigned long nr_pages,
1880 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001881{
David Woodhouse9051aa02009-06-29 12:30:54 +01001882 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1883}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001884
David Woodhouse9051aa02009-06-29 12:30:54 +01001885static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1886 unsigned long phys_pfn, unsigned long nr_pages,
1887 int prot)
1888{
1889 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001890}
1891
Weidong Hanc7151a82008-12-08 22:51:37 +08001892static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001893{
Weidong Hanc7151a82008-12-08 22:51:37 +08001894 if (!iommu)
1895 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001896
1897 clear_context_table(iommu, bus, devfn);
1898 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001899 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001900 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001901}
1902
1903static void domain_remove_dev_info(struct dmar_domain *domain)
1904{
1905 struct device_domain_info *info;
1906 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001907 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001908
1909 spin_lock_irqsave(&device_domain_lock, flags);
1910 while (!list_empty(&domain->devices)) {
1911 info = list_entry(domain->devices.next,
1912 struct device_domain_info, link);
1913 list_del(&info->link);
1914 list_del(&info->global);
1915 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001916 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001917 spin_unlock_irqrestore(&device_domain_lock, flags);
1918
Yu Zhao93a23a72009-05-18 13:51:37 +08001919 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001920 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001921 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001922 free_devinfo_mem(info);
1923
1924 spin_lock_irqsave(&device_domain_lock, flags);
1925 }
1926 spin_unlock_irqrestore(&device_domain_lock, flags);
1927}
1928
1929/*
1930 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001931 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001932 */
Kay, Allen M38717942008-09-09 18:37:29 +03001933static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001934find_domain(struct pci_dev *pdev)
1935{
1936 struct device_domain_info *info;
1937
1938 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001939 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001940 if (info)
1941 return info->domain;
1942 return NULL;
1943}
1944
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001945/* domain is initialized */
1946static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1947{
1948 struct dmar_domain *domain, *found = NULL;
1949 struct intel_iommu *iommu;
1950 struct dmar_drhd_unit *drhd;
1951 struct device_domain_info *info, *tmp;
1952 struct pci_dev *dev_tmp;
1953 unsigned long flags;
1954 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001955 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001956 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001957
1958 domain = find_domain(pdev);
1959 if (domain)
1960 return domain;
1961
David Woodhouse276dbf992009-04-04 01:45:37 +01001962 segment = pci_domain_nr(pdev->bus);
1963
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001964 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1965 if (dev_tmp) {
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001966 if (pci_is_pcie(dev_tmp)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001967 bus = dev_tmp->subordinate->number;
1968 devfn = 0;
1969 } else {
1970 bus = dev_tmp->bus->number;
1971 devfn = dev_tmp->devfn;
1972 }
1973 spin_lock_irqsave(&device_domain_lock, flags);
1974 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001975 if (info->segment == segment &&
1976 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001977 found = info->domain;
1978 break;
1979 }
1980 }
1981 spin_unlock_irqrestore(&device_domain_lock, flags);
1982 /* pcie-pci bridge already has a domain, uses it */
1983 if (found) {
1984 domain = found;
1985 goto found_domain;
1986 }
1987 }
1988
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001989 domain = alloc_domain();
1990 if (!domain)
1991 goto error;
1992
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001993 /* Allocate new domain for the device */
1994 drhd = dmar_find_matched_drhd_unit(pdev);
1995 if (!drhd) {
1996 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1997 pci_name(pdev));
1998 return NULL;
1999 }
2000 iommu = drhd->iommu;
2001
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002002 ret = iommu_attach_domain(domain, iommu);
2003 if (ret) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002004 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002005 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002006 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002007
2008 if (domain_init(domain, gaw)) {
2009 domain_exit(domain);
2010 goto error;
2011 }
2012
2013 /* register pcie-to-pci device */
2014 if (dev_tmp) {
2015 info = alloc_devinfo_mem();
2016 if (!info) {
2017 domain_exit(domain);
2018 goto error;
2019 }
David Woodhouse276dbf992009-04-04 01:45:37 +01002020 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002021 info->bus = bus;
2022 info->devfn = devfn;
2023 info->dev = NULL;
2024 info->domain = domain;
2025 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08002026 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002027
2028 /* pcie-to-pci bridge already has a domain, uses it */
2029 found = NULL;
2030 spin_lock_irqsave(&device_domain_lock, flags);
2031 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01002032 if (tmp->segment == segment &&
2033 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002034 found = tmp->domain;
2035 break;
2036 }
2037 }
2038 if (found) {
Jiri Slaby00dfff72010-06-14 17:17:32 +02002039 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002040 free_devinfo_mem(info);
2041 domain_exit(domain);
2042 domain = found;
2043 } else {
2044 list_add(&info->link, &domain->devices);
2045 list_add(&info->global, &device_domain_list);
Jiri Slaby00dfff72010-06-14 17:17:32 +02002046 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002047 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002048 }
2049
2050found_domain:
2051 info = alloc_devinfo_mem();
2052 if (!info)
2053 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01002054 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002055 info->bus = pdev->bus->number;
2056 info->devfn = pdev->devfn;
2057 info->dev = pdev;
2058 info->domain = domain;
2059 spin_lock_irqsave(&device_domain_lock, flags);
2060 /* somebody is fast */
2061 found = find_domain(pdev);
2062 if (found != NULL) {
2063 spin_unlock_irqrestore(&device_domain_lock, flags);
2064 if (found != domain) {
2065 domain_exit(domain);
2066 domain = found;
2067 }
2068 free_devinfo_mem(info);
2069 return domain;
2070 }
2071 list_add(&info->link, &domain->devices);
2072 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002073 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002074 spin_unlock_irqrestore(&device_domain_lock, flags);
2075 return domain;
2076error:
2077 /* recheck it here, maybe others set it */
2078 return find_domain(pdev);
2079}
2080
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002081static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002082#define IDENTMAP_ALL 1
2083#define IDENTMAP_GFX 2
2084#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002085
David Woodhouseb2132032009-06-26 18:50:28 +01002086static int iommu_domain_identity_map(struct dmar_domain *domain,
2087 unsigned long long start,
2088 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002089{
David Woodhousec5395d52009-06-28 16:35:56 +01002090 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2091 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002092
David Woodhousec5395d52009-06-28 16:35:56 +01002093 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2094 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002095 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002096 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002097 }
2098
David Woodhousec5395d52009-06-28 16:35:56 +01002099 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2100 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002101 /*
2102 * RMRR range might have overlap with physical memory range,
2103 * clear it first
2104 */
David Woodhousec5395d52009-06-28 16:35:56 +01002105 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002106
David Woodhousec5395d52009-06-28 16:35:56 +01002107 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2108 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002109 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002110}
2111
2112static int iommu_prepare_identity_map(struct pci_dev *pdev,
2113 unsigned long long start,
2114 unsigned long long end)
2115{
2116 struct dmar_domain *domain;
2117 int ret;
2118
David Woodhousec7ab48d2009-06-26 19:10:36 +01002119 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002120 if (!domain)
2121 return -ENOMEM;
2122
David Woodhouse19943b02009-08-04 16:19:20 +01002123 /* For _hardware_ passthrough, don't bother. But for software
2124 passthrough, we do it anyway -- it may indicate a memory
2125 range which is reserved in E820, so which didn't get set
2126 up to start with in si_domain */
2127 if (domain == si_domain && hw_pass_through) {
2128 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2129 pci_name(pdev), start, end);
2130 return 0;
2131 }
2132
2133 printk(KERN_INFO
2134 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2135 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002136
David Woodhouse5595b522009-12-02 09:21:55 +00002137 if (end < start) {
2138 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2139 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2140 dmi_get_system_info(DMI_BIOS_VENDOR),
2141 dmi_get_system_info(DMI_BIOS_VERSION),
2142 dmi_get_system_info(DMI_PRODUCT_VERSION));
2143 ret = -EIO;
2144 goto error;
2145 }
2146
David Woodhouse2ff729f2009-08-26 14:25:41 +01002147 if (end >> agaw_to_width(domain->agaw)) {
2148 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2149 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2150 agaw_to_width(domain->agaw),
2151 dmi_get_system_info(DMI_BIOS_VENDOR),
2152 dmi_get_system_info(DMI_BIOS_VERSION),
2153 dmi_get_system_info(DMI_PRODUCT_VERSION));
2154 ret = -EIO;
2155 goto error;
2156 }
David Woodhouse19943b02009-08-04 16:19:20 +01002157
David Woodhouseb2132032009-06-26 18:50:28 +01002158 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002159 if (ret)
2160 goto error;
2161
2162 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002163 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002164 if (ret)
2165 goto error;
2166
2167 return 0;
2168
2169 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002170 domain_exit(domain);
2171 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002172}
2173
2174static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2175 struct pci_dev *pdev)
2176{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002177 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002178 return 0;
2179 return iommu_prepare_identity_map(pdev, rmrr->base_address,
David Woodhouse70e535d2011-05-31 00:22:52 +01002180 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002181}
2182
Suresh Siddhad3f13812011-08-23 17:05:25 -07002183#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002184static inline void iommu_prepare_isa(void)
2185{
2186 struct pci_dev *pdev;
2187 int ret;
2188
2189 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2190 if (!pdev)
2191 return;
2192
David Woodhousec7ab48d2009-06-26 19:10:36 +01002193 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse70e535d2011-05-31 00:22:52 +01002194 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002195
2196 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002197 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2198 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002199
2200}
2201#else
2202static inline void iommu_prepare_isa(void)
2203{
2204 return;
2205}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002206#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002207
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002208static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002209
Matt Kraai071e1372009-08-23 22:30:22 -07002210static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002211{
2212 struct dmar_drhd_unit *drhd;
2213 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002214 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002215
2216 si_domain = alloc_domain();
2217 if (!si_domain)
2218 return -EFAULT;
2219
David Woodhousec7ab48d2009-06-26 19:10:36 +01002220 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002221
2222 for_each_active_iommu(iommu, drhd) {
2223 ret = iommu_attach_domain(si_domain, iommu);
2224 if (ret) {
2225 domain_exit(si_domain);
2226 return -EFAULT;
2227 }
2228 }
2229
2230 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2231 domain_exit(si_domain);
2232 return -EFAULT;
2233 }
2234
2235 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2236
David Woodhouse19943b02009-08-04 16:19:20 +01002237 if (hw)
2238 return 0;
2239
David Woodhousec7ab48d2009-06-26 19:10:36 +01002240 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002241 unsigned long start_pfn, end_pfn;
2242 int i;
2243
2244 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2245 ret = iommu_domain_identity_map(si_domain,
2246 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2247 if (ret)
2248 return ret;
2249 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002250 }
2251
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002252 return 0;
2253}
2254
2255static void domain_remove_one_dev_info(struct dmar_domain *domain,
2256 struct pci_dev *pdev);
2257static int identity_mapping(struct pci_dev *pdev)
2258{
2259 struct device_domain_info *info;
2260
2261 if (likely(!iommu_identity_mapping))
2262 return 0;
2263
Mike Traviscb452a42011-05-28 13:15:03 -05002264 info = pdev->dev.archdata.iommu;
2265 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2266 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002267
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002268 return 0;
2269}
2270
2271static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002272 struct pci_dev *pdev,
2273 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002274{
2275 struct device_domain_info *info;
2276 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002277 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002278
2279 info = alloc_devinfo_mem();
2280 if (!info)
2281 return -ENOMEM;
2282
David Woodhouse5fe60f42009-08-09 10:53:41 +01002283 ret = domain_context_mapping(domain, pdev, translation);
2284 if (ret) {
2285 free_devinfo_mem(info);
2286 return ret;
2287 }
2288
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002289 info->segment = pci_domain_nr(pdev->bus);
2290 info->bus = pdev->bus->number;
2291 info->devfn = pdev->devfn;
2292 info->dev = pdev;
2293 info->domain = domain;
2294
2295 spin_lock_irqsave(&device_domain_lock, flags);
2296 list_add(&info->link, &domain->devices);
2297 list_add(&info->global, &device_domain_list);
2298 pdev->dev.archdata.iommu = info;
2299 spin_unlock_irqrestore(&device_domain_lock, flags);
2300
2301 return 0;
2302}
2303
David Woodhouse6941af22009-07-04 18:24:27 +01002304static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2305{
David Woodhousee0fc7e02009-09-30 09:12:17 -07002306 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2307 return 1;
2308
2309 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2310 return 1;
2311
2312 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2313 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002314
David Woodhouse3dfc8132009-07-04 19:11:08 +01002315 /*
2316 * We want to start off with all devices in the 1:1 domain, and
2317 * take them out later if we find they can't access all of memory.
2318 *
2319 * However, we can't do this for PCI devices behind bridges,
2320 * because all PCI devices behind the same bridge will end up
2321 * with the same source-id on their transactions.
2322 *
2323 * Practically speaking, we can't change things around for these
2324 * devices at run-time, because we can't be sure there'll be no
2325 * DMA transactions in flight for any of their siblings.
2326 *
2327 * So PCI devices (unless they're on the root bus) as well as
2328 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2329 * the 1:1 domain, just in _case_ one of their siblings turns out
2330 * not to be able to map all of memory.
2331 */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002332 if (!pci_is_pcie(pdev)) {
David Woodhouse3dfc8132009-07-04 19:11:08 +01002333 if (!pci_is_root_bus(pdev->bus))
2334 return 0;
2335 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2336 return 0;
2337 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2338 return 0;
2339
2340 /*
2341 * At boot time, we don't yet know if devices will be 64-bit capable.
2342 * Assume that they will -- if they turn out not to be, then we can
2343 * take them out of the 1:1 domain later.
2344 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002345 if (!startup) {
2346 /*
2347 * If the device's dma_mask is less than the system's memory
2348 * size then this is not a candidate for identity mapping.
2349 */
2350 u64 dma_mask = pdev->dma_mask;
2351
2352 if (pdev->dev.coherent_dma_mask &&
2353 pdev->dev.coherent_dma_mask < dma_mask)
2354 dma_mask = pdev->dev.coherent_dma_mask;
2355
2356 return dma_mask >= dma_get_required_mask(&pdev->dev);
2357 }
David Woodhouse6941af22009-07-04 18:24:27 +01002358
2359 return 1;
2360}
2361
Matt Kraai071e1372009-08-23 22:30:22 -07002362static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002363{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002364 struct pci_dev *pdev = NULL;
2365 int ret;
2366
David Woodhouse19943b02009-08-04 16:19:20 +01002367 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002368 if (ret)
2369 return -EFAULT;
2370
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002371 for_each_pci_dev(pdev) {
Mike Travis825507d2011-05-28 13:15:06 -05002372 /* Skip Host/PCI Bridge devices */
2373 if (IS_BRIDGE_HOST_DEVICE(pdev))
2374 continue;
David Woodhouse6941af22009-07-04 18:24:27 +01002375 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse19943b02009-08-04 16:19:20 +01002376 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2377 hw ? "hardware" : "software", pci_name(pdev));
David Woodhousec7ab48d2009-06-26 19:10:36 +01002378
David Woodhouse5fe60f42009-08-09 10:53:41 +01002379 ret = domain_add_dev_info(si_domain, pdev,
David Woodhouse19943b02009-08-04 16:19:20 +01002380 hw ? CONTEXT_TT_PASS_THROUGH :
David Woodhouse62edf5d2009-07-04 10:59:46 +01002381 CONTEXT_TT_MULTI_LEVEL);
2382 if (ret)
2383 return ret;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002384 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002385 }
2386
2387 return 0;
2388}
2389
Joseph Cihulab7792602011-05-03 00:08:37 -07002390static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002391{
2392 struct dmar_drhd_unit *drhd;
2393 struct dmar_rmrr_unit *rmrr;
2394 struct pci_dev *pdev;
2395 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002396 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002397
2398 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002399 * for each drhd
2400 * allocate root
2401 * initialize and program root entry to not present
2402 * endfor
2403 */
2404 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002405 g_num_of_iommus++;
2406 /*
2407 * lock not needed as this is only incremented in the single
2408 * threaded kernel __init code path all other access are read
2409 * only
2410 */
2411 }
2412
Weidong Hand9630fe2008-12-08 11:06:32 +08002413 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2414 GFP_KERNEL);
2415 if (!g_iommus) {
2416 printk(KERN_ERR "Allocating global iommu array failed\n");
2417 ret = -ENOMEM;
2418 goto error;
2419 }
2420
mark gross80b20dd2008-04-18 13:53:58 -07002421 deferred_flush = kzalloc(g_num_of_iommus *
2422 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2423 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002424 ret = -ENOMEM;
2425 goto error;
2426 }
2427
mark gross5e0d2a62008-03-04 15:22:08 -08002428 for_each_drhd_unit(drhd) {
2429 if (drhd->ignored)
2430 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002431
2432 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002433 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002434
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002435 ret = iommu_init_domains(iommu);
2436 if (ret)
2437 goto error;
2438
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002439 /*
2440 * TBD:
2441 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002442 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002443 */
2444 ret = iommu_alloc_root_entry(iommu);
2445 if (ret) {
2446 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2447 goto error;
2448 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002449 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002450 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002451 }
2452
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002453 /*
2454 * Start from the sane iommu hardware state.
2455 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002456 for_each_drhd_unit(drhd) {
2457 if (drhd->ignored)
2458 continue;
2459
2460 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002461
2462 /*
2463 * If the queued invalidation is already initialized by us
2464 * (for example, while enabling interrupt-remapping) then
2465 * we got the things already rolling from a sane state.
2466 */
2467 if (iommu->qi)
2468 continue;
2469
2470 /*
2471 * Clear any previous faults.
2472 */
2473 dmar_fault(-1, iommu);
2474 /*
2475 * Disable queued invalidation if supported and already enabled
2476 * before OS handover.
2477 */
2478 dmar_disable_qi(iommu);
2479 }
2480
2481 for_each_drhd_unit(drhd) {
2482 if (drhd->ignored)
2483 continue;
2484
2485 iommu = drhd->iommu;
2486
Youquan Songa77b67d2008-10-16 16:31:56 -07002487 if (dmar_enable_qi(iommu)) {
2488 /*
2489 * Queued Invalidate not enabled, use Register Based
2490 * Invalidate
2491 */
2492 iommu->flush.flush_context = __iommu_flush_context;
2493 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002494 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002495 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002496 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002497 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002498 } else {
2499 iommu->flush.flush_context = qi_flush_context;
2500 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002501 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002502 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002503 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002504 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002505 }
2506 }
2507
David Woodhouse19943b02009-08-04 16:19:20 +01002508 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002509 iommu_identity_mapping |= IDENTMAP_ALL;
2510
Suresh Siddhad3f13812011-08-23 17:05:25 -07002511#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002512 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002513#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002514
2515 check_tylersburg_isoch();
2516
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002517 /*
2518 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002519 * identity mappings for rmrr, gfx, and isa and may fall back to static
2520 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002521 */
David Woodhouse19943b02009-08-04 16:19:20 +01002522 if (iommu_identity_mapping) {
2523 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2524 if (ret) {
2525 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2526 goto error;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002527 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002528 }
David Woodhouse19943b02009-08-04 16:19:20 +01002529 /*
2530 * For each rmrr
2531 * for each dev attached to rmrr
2532 * do
2533 * locate drhd for dev, alloc domain for dev
2534 * allocate free domain
2535 * allocate page table entries for rmrr
2536 * if context not allocated for bus
2537 * allocate and init context
2538 * set present in root table for this bus
2539 * init context with domain, translation etc
2540 * endfor
2541 * endfor
2542 */
2543 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2544 for_each_rmrr_units(rmrr) {
2545 for (i = 0; i < rmrr->devices_cnt; i++) {
2546 pdev = rmrr->devices[i];
2547 /*
2548 * some BIOS lists non-exist devices in DMAR
2549 * table.
2550 */
2551 if (!pdev)
2552 continue;
2553 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2554 if (ret)
2555 printk(KERN_ERR
2556 "IOMMU: mapping reserved region failed\n");
2557 }
2558 }
2559
2560 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002561
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002562 /*
2563 * for each drhd
2564 * enable fault log
2565 * global invalidate context cache
2566 * global invalidate iotlb
2567 * enable translation
2568 */
2569 for_each_drhd_unit(drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002570 if (drhd->ignored) {
2571 /*
2572 * we always have to disable PMRs or DMA may fail on
2573 * this device
2574 */
2575 if (force_on)
2576 iommu_disable_protect_mem_regions(drhd->iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002577 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002578 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002579 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002580
2581 iommu_flush_write_buffer(iommu);
2582
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002583 ret = dmar_set_interrupt(iommu);
2584 if (ret)
2585 goto error;
2586
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002587 iommu_set_root_entry(iommu);
2588
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002589 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002590 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002591
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002592 ret = iommu_enable_translation(iommu);
2593 if (ret)
2594 goto error;
David Woodhouseb94996c2009-09-19 15:28:12 -07002595
2596 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002597 }
2598
2599 return 0;
2600error:
2601 for_each_drhd_unit(drhd) {
2602 if (drhd->ignored)
2603 continue;
2604 iommu = drhd->iommu;
2605 free_iommu(iommu);
2606 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002607 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002608 return ret;
2609}
2610
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002611/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002612static struct iova *intel_alloc_iova(struct device *dev,
2613 struct dmar_domain *domain,
2614 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002615{
2616 struct pci_dev *pdev = to_pci_dev(dev);
2617 struct iova *iova = NULL;
2618
David Woodhouse875764d2009-06-28 21:20:51 +01002619 /* Restrict dma_mask to the width that the iommu can handle */
2620 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2621
2622 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002623 /*
2624 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002625 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002626 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002627 */
David Woodhouse875764d2009-06-28 21:20:51 +01002628 iova = alloc_iova(&domain->iovad, nrpages,
2629 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2630 if (iova)
2631 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002632 }
David Woodhouse875764d2009-06-28 21:20:51 +01002633 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2634 if (unlikely(!iova)) {
2635 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2636 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002637 return NULL;
2638 }
2639
2640 return iova;
2641}
2642
David Woodhouse147202a2009-07-07 19:43:20 +01002643static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002644{
2645 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002646 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002647
2648 domain = get_domain_for_dev(pdev,
2649 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2650 if (!domain) {
2651 printk(KERN_ERR
2652 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002653 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002654 }
2655
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002656 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002657 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002658 ret = domain_context_mapping(domain, pdev,
2659 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002660 if (ret) {
2661 printk(KERN_ERR
2662 "Domain context map for %s failed",
2663 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002664 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002665 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002666 }
2667
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002668 return domain;
2669}
2670
David Woodhouse147202a2009-07-07 19:43:20 +01002671static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2672{
2673 struct device_domain_info *info;
2674
2675 /* No lock here, assumes no domain exit in normal case */
2676 info = dev->dev.archdata.iommu;
2677 if (likely(info))
2678 return info->domain;
2679
2680 return __get_valid_domain_for_dev(dev);
2681}
2682
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002683static int iommu_dummy(struct pci_dev *pdev)
2684{
2685 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2686}
2687
2688/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002689static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002690{
David Woodhouse73676832009-07-04 14:08:36 +01002691 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002692 int found;
2693
David Woodhouse73676832009-07-04 14:08:36 +01002694 if (unlikely(dev->bus != &pci_bus_type))
2695 return 1;
2696
2697 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002698 if (iommu_dummy(pdev))
2699 return 1;
2700
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002701 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002702 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002703
2704 found = identity_mapping(pdev);
2705 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002706 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002707 return 1;
2708 else {
2709 /*
2710 * 32 bit DMA is removed from si_domain and fall back
2711 * to non-identity mapping.
2712 */
2713 domain_remove_one_dev_info(si_domain, pdev);
2714 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2715 pci_name(pdev));
2716 return 0;
2717 }
2718 } else {
2719 /*
2720 * In case of a detached 64 bit DMA device from vm, the device
2721 * is put into si_domain for identity mapping.
2722 */
David Woodhouse6941af22009-07-04 18:24:27 +01002723 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002724 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002725 ret = domain_add_dev_info(si_domain, pdev,
2726 hw_pass_through ?
2727 CONTEXT_TT_PASS_THROUGH :
2728 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002729 if (!ret) {
2730 printk(KERN_INFO "64bit %s uses identity mapping\n",
2731 pci_name(pdev));
2732 return 1;
2733 }
2734 }
2735 }
2736
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002737 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002738}
2739
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002740static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2741 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002742{
2743 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002744 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002745 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002746 struct iova *iova;
2747 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002748 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002749 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002750 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002751
2752 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002753
David Woodhouse73676832009-07-04 14:08:36 +01002754 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002755 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002756
2757 domain = get_valid_domain_for_dev(pdev);
2758 if (!domain)
2759 return 0;
2760
Weidong Han8c11e792008-12-08 15:29:22 +08002761 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002762 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002763
Mike Travisc681d0b2011-05-28 13:15:05 -05002764 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002765 if (!iova)
2766 goto error;
2767
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002768 /*
2769 * Check if DMAR supports zero-length reads on write only
2770 * mappings..
2771 */
2772 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002773 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002774 prot |= DMA_PTE_READ;
2775 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2776 prot |= DMA_PTE_WRITE;
2777 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002778 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002779 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002780 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002781 * is not a big problem
2782 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002783 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002784 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002785 if (ret)
2786 goto error;
2787
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002788 /* it's a non-present to present mapping. Only flush if caching mode */
2789 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002790 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002791 else
Weidong Han8c11e792008-12-08 15:29:22 +08002792 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002793
David Woodhouse03d6a242009-06-28 15:33:46 +01002794 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2795 start_paddr += paddr & ~PAGE_MASK;
2796 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002797
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002798error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002799 if (iova)
2800 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002801 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002802 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002803 return 0;
2804}
2805
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002806static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2807 unsigned long offset, size_t size,
2808 enum dma_data_direction dir,
2809 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002810{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002811 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2812 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002813}
2814
mark gross5e0d2a62008-03-04 15:22:08 -08002815static void flush_unmaps(void)
2816{
mark gross80b20dd2008-04-18 13:53:58 -07002817 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002818
mark gross5e0d2a62008-03-04 15:22:08 -08002819 timer_on = 0;
2820
2821 /* just flush them all */
2822 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002823 struct intel_iommu *iommu = g_iommus[i];
2824 if (!iommu)
2825 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002826
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002827 if (!deferred_flush[i].next)
2828 continue;
2829
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002830 /* In caching mode, global flushes turn emulation expensive */
2831 if (!cap_caching_mode(iommu->cap))
2832 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002833 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002834 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002835 unsigned long mask;
2836 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002837 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08002838
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002839 /* On real hardware multiple invalidations are expensive */
2840 if (cap_caching_mode(iommu->cap))
2841 iommu_flush_iotlb_psi(iommu, domain->id,
2842 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2843 else {
2844 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2845 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2846 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2847 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002848 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002849 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002850 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002851 }
2852
mark gross5e0d2a62008-03-04 15:22:08 -08002853 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002854}
2855
2856static void flush_unmaps_timeout(unsigned long data)
2857{
mark gross80b20dd2008-04-18 13:53:58 -07002858 unsigned long flags;
2859
2860 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002861 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002862 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002863}
2864
2865static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2866{
2867 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002868 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002869 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002870
2871 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002872 if (list_size == HIGH_WATER_MARK)
2873 flush_unmaps();
2874
Weidong Han8c11e792008-12-08 15:29:22 +08002875 iommu = domain_get_iommu(dom);
2876 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002877
mark gross80b20dd2008-04-18 13:53:58 -07002878 next = deferred_flush[iommu_id].next;
2879 deferred_flush[iommu_id].domain[next] = dom;
2880 deferred_flush[iommu_id].iova[next] = iova;
2881 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002882
2883 if (!timer_on) {
2884 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2885 timer_on = 1;
2886 }
2887 list_size++;
2888 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2889}
2890
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002891static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2892 size_t size, enum dma_data_direction dir,
2893 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002894{
2895 struct pci_dev *pdev = to_pci_dev(dev);
2896 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002897 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002898 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002899 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002900
David Woodhouse73676832009-07-04 14:08:36 +01002901 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002902 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002903
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002904 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002905 BUG_ON(!domain);
2906
Weidong Han8c11e792008-12-08 15:29:22 +08002907 iommu = domain_get_iommu(domain);
2908
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002909 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002910 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2911 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002912 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002913
David Woodhoused794dc92009-06-28 00:27:49 +01002914 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2915 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002916
David Woodhoused794dc92009-06-28 00:27:49 +01002917 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2918 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002919
2920 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002921 dma_pte_clear_range(domain, start_pfn, last_pfn);
2922
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002923 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002924 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2925
mark gross5e0d2a62008-03-04 15:22:08 -08002926 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002927 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002928 last_pfn - start_pfn + 1, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08002929 /* free iova */
2930 __free_iova(&domain->iovad, iova);
2931 } else {
2932 add_unmap(domain, iova);
2933 /*
2934 * queue up the release of the unmap to save the 1/6th of the
2935 * cpu used up by the iotlb flush operation...
2936 */
mark gross5e0d2a62008-03-04 15:22:08 -08002937 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002938}
2939
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002940static void *intel_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02002941 dma_addr_t *dma_handle, gfp_t flags,
2942 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002943{
2944 void *vaddr;
2945 int order;
2946
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002947 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002948 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07002949
2950 if (!iommu_no_mapping(hwdev))
2951 flags &= ~(GFP_DMA | GFP_DMA32);
2952 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2953 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2954 flags |= GFP_DMA;
2955 else
2956 flags |= GFP_DMA32;
2957 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002958
2959 vaddr = (void *)__get_free_pages(flags, order);
2960 if (!vaddr)
2961 return NULL;
2962 memset(vaddr, 0, size);
2963
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002964 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2965 DMA_BIDIRECTIONAL,
2966 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002967 if (*dma_handle)
2968 return vaddr;
2969 free_pages((unsigned long)vaddr, order);
2970 return NULL;
2971}
2972
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002973static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02002974 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002975{
2976 int order;
2977
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002978 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002979 order = get_order(size);
2980
David Woodhouse0db9b7a2009-07-14 02:01:57 +01002981 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002982 free_pages((unsigned long)vaddr, order);
2983}
2984
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002985static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2986 int nelems, enum dma_data_direction dir,
2987 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002988{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002989 struct pci_dev *pdev = to_pci_dev(hwdev);
2990 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002991 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002992 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002993 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002994
David Woodhouse73676832009-07-04 14:08:36 +01002995 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002996 return;
2997
2998 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002999 BUG_ON(!domain);
3000
3001 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003002
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003003 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01003004 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3005 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003006 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003007
David Woodhoused794dc92009-06-28 00:27:49 +01003008 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3009 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003010
3011 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01003012 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003013
David Woodhoused794dc92009-06-28 00:27:49 +01003014 /* free page tables */
3015 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3016
David Woodhouseacea0012009-07-14 01:55:11 +01003017 if (intel_iommu_strict) {
3018 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03003019 last_pfn - start_pfn + 1, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003020 /* free iova */
3021 __free_iova(&domain->iovad, iova);
3022 } else {
3023 add_unmap(domain, iova);
3024 /*
3025 * queue up the release of the unmap to save the 1/6th of the
3026 * cpu used up by the iotlb flush operation...
3027 */
3028 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003029}
3030
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003031static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003032 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003033{
3034 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003035 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003036
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003037 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003038 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003039 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003040 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003041 }
3042 return nelems;
3043}
3044
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003045static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3046 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003047{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003048 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003049 struct pci_dev *pdev = to_pci_dev(hwdev);
3050 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003051 size_t size = 0;
3052 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003053 struct iova *iova = NULL;
3054 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003055 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003056 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003057 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003058
3059 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01003060 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003061 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003062
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003063 domain = get_valid_domain_for_dev(pdev);
3064 if (!domain)
3065 return 0;
3066
Weidong Han8c11e792008-12-08 15:29:22 +08003067 iommu = domain_get_iommu(domain);
3068
David Woodhouseb536d242009-06-28 14:49:31 +01003069 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003070 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003071
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003072 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3073 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003074 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003075 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003076 return 0;
3077 }
3078
3079 /*
3080 * Check if DMAR supports zero-length reads on write only
3081 * mappings..
3082 */
3083 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003084 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003085 prot |= DMA_PTE_READ;
3086 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3087 prot |= DMA_PTE_WRITE;
3088
David Woodhouseb536d242009-06-28 14:49:31 +01003089 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003090
Fenghua Yuf5329592009-08-04 15:09:37 -07003091 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003092 if (unlikely(ret)) {
3093 /* clear the page */
3094 dma_pte_clear_range(domain, start_vpfn,
3095 start_vpfn + size - 1);
3096 /* free page tables */
3097 dma_pte_free_pagetable(domain, start_vpfn,
3098 start_vpfn + size - 1);
3099 /* free iova */
3100 __free_iova(&domain->iovad, iova);
3101 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003102 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003103
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003104 /* it's a non-present to present mapping. Only flush if caching mode */
3105 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03003106 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003107 else
Weidong Han8c11e792008-12-08 15:29:22 +08003108 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003109
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003110 return nelems;
3111}
3112
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003113static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3114{
3115 return !dma_addr;
3116}
3117
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003118struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003119 .alloc = intel_alloc_coherent,
3120 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003121 .map_sg = intel_map_sg,
3122 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003123 .map_page = intel_map_page,
3124 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003125 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003126};
3127
3128static inline int iommu_domain_cache_init(void)
3129{
3130 int ret = 0;
3131
3132 iommu_domain_cache = kmem_cache_create("iommu_domain",
3133 sizeof(struct dmar_domain),
3134 0,
3135 SLAB_HWCACHE_ALIGN,
3136
3137 NULL);
3138 if (!iommu_domain_cache) {
3139 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3140 ret = -ENOMEM;
3141 }
3142
3143 return ret;
3144}
3145
3146static inline int iommu_devinfo_cache_init(void)
3147{
3148 int ret = 0;
3149
3150 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3151 sizeof(struct device_domain_info),
3152 0,
3153 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003154 NULL);
3155 if (!iommu_devinfo_cache) {
3156 printk(KERN_ERR "Couldn't create devinfo cache\n");
3157 ret = -ENOMEM;
3158 }
3159
3160 return ret;
3161}
3162
3163static inline int iommu_iova_cache_init(void)
3164{
3165 int ret = 0;
3166
3167 iommu_iova_cache = kmem_cache_create("iommu_iova",
3168 sizeof(struct iova),
3169 0,
3170 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003171 NULL);
3172 if (!iommu_iova_cache) {
3173 printk(KERN_ERR "Couldn't create iova cache\n");
3174 ret = -ENOMEM;
3175 }
3176
3177 return ret;
3178}
3179
3180static int __init iommu_init_mempool(void)
3181{
3182 int ret;
3183 ret = iommu_iova_cache_init();
3184 if (ret)
3185 return ret;
3186
3187 ret = iommu_domain_cache_init();
3188 if (ret)
3189 goto domain_error;
3190
3191 ret = iommu_devinfo_cache_init();
3192 if (!ret)
3193 return ret;
3194
3195 kmem_cache_destroy(iommu_domain_cache);
3196domain_error:
3197 kmem_cache_destroy(iommu_iova_cache);
3198
3199 return -ENOMEM;
3200}
3201
3202static void __init iommu_exit_mempool(void)
3203{
3204 kmem_cache_destroy(iommu_devinfo_cache);
3205 kmem_cache_destroy(iommu_domain_cache);
3206 kmem_cache_destroy(iommu_iova_cache);
3207
3208}
3209
Dan Williams556ab452010-07-23 15:47:56 -07003210static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3211{
3212 struct dmar_drhd_unit *drhd;
3213 u32 vtbar;
3214 int rc;
3215
3216 /* We know that this device on this chipset has its own IOMMU.
3217 * If we find it under a different IOMMU, then the BIOS is lying
3218 * to us. Hope that the IOMMU for this device is actually
3219 * disabled, and it needs no translation...
3220 */
3221 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3222 if (rc) {
3223 /* "can't" happen */
3224 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3225 return;
3226 }
3227 vtbar &= 0xffff0000;
3228
3229 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3230 drhd = dmar_find_matched_drhd_unit(pdev);
3231 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3232 TAINT_FIRMWARE_WORKAROUND,
3233 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3234 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3235}
3236DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3237
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003238static void __init init_no_remapping_devices(void)
3239{
3240 struct dmar_drhd_unit *drhd;
3241
3242 for_each_drhd_unit(drhd) {
3243 if (!drhd->include_all) {
3244 int i;
3245 for (i = 0; i < drhd->devices_cnt; i++)
3246 if (drhd->devices[i] != NULL)
3247 break;
3248 /* ignore DMAR unit if no pci devices exist */
3249 if (i == drhd->devices_cnt)
3250 drhd->ignored = 1;
3251 }
3252 }
3253
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003254 for_each_drhd_unit(drhd) {
3255 int i;
3256 if (drhd->ignored || drhd->include_all)
3257 continue;
3258
3259 for (i = 0; i < drhd->devices_cnt; i++)
3260 if (drhd->devices[i] &&
David Woodhousec0771df2011-10-14 20:59:46 +01003261 !IS_GFX_DEVICE(drhd->devices[i]))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003262 break;
3263
3264 if (i < drhd->devices_cnt)
3265 continue;
3266
David Woodhousec0771df2011-10-14 20:59:46 +01003267 /* This IOMMU has *only* gfx devices. Either bypass it or
3268 set the gfx_mapped flag, as appropriate */
3269 if (dmar_map_gfx) {
3270 intel_iommu_gfx_mapped = 1;
3271 } else {
3272 drhd->ignored = 1;
3273 for (i = 0; i < drhd->devices_cnt; i++) {
3274 if (!drhd->devices[i])
3275 continue;
3276 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3277 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003278 }
3279 }
3280}
3281
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003282#ifdef CONFIG_SUSPEND
3283static int init_iommu_hw(void)
3284{
3285 struct dmar_drhd_unit *drhd;
3286 struct intel_iommu *iommu = NULL;
3287
3288 for_each_active_iommu(iommu, drhd)
3289 if (iommu->qi)
3290 dmar_reenable_qi(iommu);
3291
Joseph Cihulab7792602011-05-03 00:08:37 -07003292 for_each_iommu(iommu, drhd) {
3293 if (drhd->ignored) {
3294 /*
3295 * we always have to disable PMRs or DMA may fail on
3296 * this device
3297 */
3298 if (force_on)
3299 iommu_disable_protect_mem_regions(iommu);
3300 continue;
3301 }
3302
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003303 iommu_flush_write_buffer(iommu);
3304
3305 iommu_set_root_entry(iommu);
3306
3307 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003308 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003309 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003310 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003311 if (iommu_enable_translation(iommu))
3312 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003313 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003314 }
3315
3316 return 0;
3317}
3318
3319static void iommu_flush_all(void)
3320{
3321 struct dmar_drhd_unit *drhd;
3322 struct intel_iommu *iommu;
3323
3324 for_each_active_iommu(iommu, drhd) {
3325 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003326 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003327 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003328 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003329 }
3330}
3331
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003332static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003333{
3334 struct dmar_drhd_unit *drhd;
3335 struct intel_iommu *iommu = NULL;
3336 unsigned long flag;
3337
3338 for_each_active_iommu(iommu, drhd) {
3339 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3340 GFP_ATOMIC);
3341 if (!iommu->iommu_state)
3342 goto nomem;
3343 }
3344
3345 iommu_flush_all();
3346
3347 for_each_active_iommu(iommu, drhd) {
3348 iommu_disable_translation(iommu);
3349
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003350 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003351
3352 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3353 readl(iommu->reg + DMAR_FECTL_REG);
3354 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3355 readl(iommu->reg + DMAR_FEDATA_REG);
3356 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3357 readl(iommu->reg + DMAR_FEADDR_REG);
3358 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3359 readl(iommu->reg + DMAR_FEUADDR_REG);
3360
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003361 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003362 }
3363 return 0;
3364
3365nomem:
3366 for_each_active_iommu(iommu, drhd)
3367 kfree(iommu->iommu_state);
3368
3369 return -ENOMEM;
3370}
3371
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003372static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003373{
3374 struct dmar_drhd_unit *drhd;
3375 struct intel_iommu *iommu = NULL;
3376 unsigned long flag;
3377
3378 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003379 if (force_on)
3380 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3381 else
3382 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003383 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003384 }
3385
3386 for_each_active_iommu(iommu, drhd) {
3387
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003388 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003389
3390 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3391 iommu->reg + DMAR_FECTL_REG);
3392 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3393 iommu->reg + DMAR_FEDATA_REG);
3394 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3395 iommu->reg + DMAR_FEADDR_REG);
3396 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3397 iommu->reg + DMAR_FEUADDR_REG);
3398
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003399 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003400 }
3401
3402 for_each_active_iommu(iommu, drhd)
3403 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003404}
3405
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003406static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003407 .resume = iommu_resume,
3408 .suspend = iommu_suspend,
3409};
3410
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003411static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003412{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003413 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003414}
3415
3416#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003417static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003418#endif /* CONFIG_PM */
3419
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003420LIST_HEAD(dmar_rmrr_units);
3421
3422static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3423{
3424 list_add(&rmrr->list, &dmar_rmrr_units);
3425}
3426
3427
3428int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3429{
3430 struct acpi_dmar_reserved_memory *rmrr;
3431 struct dmar_rmrr_unit *rmrru;
3432
3433 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3434 if (!rmrru)
3435 return -ENOMEM;
3436
3437 rmrru->hdr = header;
3438 rmrr = (struct acpi_dmar_reserved_memory *)header;
3439 rmrru->base_address = rmrr->base_address;
3440 rmrru->end_address = rmrr->end_address;
3441
3442 dmar_register_rmrr_unit(rmrru);
3443 return 0;
3444}
3445
3446static int __init
3447rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3448{
3449 struct acpi_dmar_reserved_memory *rmrr;
3450 int ret;
3451
3452 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3453 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3454 ((void *)rmrr) + rmrr->header.length,
3455 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3456
3457 if (ret || (rmrru->devices_cnt == 0)) {
3458 list_del(&rmrru->list);
3459 kfree(rmrru);
3460 }
3461 return ret;
3462}
3463
3464static LIST_HEAD(dmar_atsr_units);
3465
3466int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3467{
3468 struct acpi_dmar_atsr *atsr;
3469 struct dmar_atsr_unit *atsru;
3470
3471 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3472 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3473 if (!atsru)
3474 return -ENOMEM;
3475
3476 atsru->hdr = hdr;
3477 atsru->include_all = atsr->flags & 0x1;
3478
3479 list_add(&atsru->list, &dmar_atsr_units);
3480
3481 return 0;
3482}
3483
3484static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3485{
3486 int rc;
3487 struct acpi_dmar_atsr *atsr;
3488
3489 if (atsru->include_all)
3490 return 0;
3491
3492 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3493 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3494 (void *)atsr + atsr->header.length,
3495 &atsru->devices_cnt, &atsru->devices,
3496 atsr->segment);
3497 if (rc || !atsru->devices_cnt) {
3498 list_del(&atsru->list);
3499 kfree(atsru);
3500 }
3501
3502 return rc;
3503}
3504
3505int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3506{
3507 int i;
3508 struct pci_bus *bus;
3509 struct acpi_dmar_atsr *atsr;
3510 struct dmar_atsr_unit *atsru;
3511
3512 dev = pci_physfn(dev);
3513
3514 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3515 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3516 if (atsr->segment == pci_domain_nr(dev->bus))
3517 goto found;
3518 }
3519
3520 return 0;
3521
3522found:
3523 for (bus = dev->bus; bus; bus = bus->parent) {
3524 struct pci_dev *bridge = bus->self;
3525
3526 if (!bridge || !pci_is_pcie(bridge) ||
3527 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
3528 return 0;
3529
3530 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
3531 for (i = 0; i < atsru->devices_cnt; i++)
3532 if (atsru->devices[i] == bridge)
3533 return 1;
3534 break;
3535 }
3536 }
3537
3538 if (atsru->include_all)
3539 return 1;
3540
3541 return 0;
3542}
3543
Sergey Senozhatskyc8f369a2011-10-26 18:45:39 +03003544int __init dmar_parse_rmrr_atsr_dev(void)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003545{
3546 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3547 struct dmar_atsr_unit *atsr, *atsr_n;
3548 int ret = 0;
3549
3550 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3551 ret = rmrr_parse_dev(rmrr);
3552 if (ret)
3553 return ret;
3554 }
3555
3556 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3557 ret = atsr_parse_dev(atsr);
3558 if (ret)
3559 return ret;
3560 }
3561
3562 return ret;
3563}
3564
Fenghua Yu99dcade2009-11-11 07:23:06 -08003565/*
3566 * Here we only respond to action of unbound device from driver.
3567 *
3568 * Added device is not attached to its DMAR domain here yet. That will happen
3569 * when mapping the device to iova.
3570 */
3571static int device_notifier(struct notifier_block *nb,
3572 unsigned long action, void *data)
3573{
3574 struct device *dev = data;
3575 struct pci_dev *pdev = to_pci_dev(dev);
3576 struct dmar_domain *domain;
3577
David Woodhouse44cd6132009-12-02 10:18:30 +00003578 if (iommu_no_mapping(dev))
3579 return 0;
3580
Fenghua Yu99dcade2009-11-11 07:23:06 -08003581 domain = find_domain(pdev);
3582 if (!domain)
3583 return 0;
3584
Alex Williamsona97590e2011-03-04 14:52:16 -07003585 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
Fenghua Yu99dcade2009-11-11 07:23:06 -08003586 domain_remove_one_dev_info(domain, pdev);
3587
Alex Williamsona97590e2011-03-04 14:52:16 -07003588 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3589 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3590 list_empty(&domain->devices))
3591 domain_exit(domain);
3592 }
3593
Fenghua Yu99dcade2009-11-11 07:23:06 -08003594 return 0;
3595}
3596
3597static struct notifier_block device_nb = {
3598 .notifier_call = device_notifier,
3599};
3600
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003601int __init intel_iommu_init(void)
3602{
3603 int ret = 0;
3604
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003605 /* VT-d is required for a TXT/tboot launch, so enforce that */
3606 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003607
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003608 if (dmar_table_init()) {
3609 if (force_on)
3610 panic("tboot: Failed to initialize DMAR table\n");
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003611 return -ENODEV;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003612 }
3613
Suresh Siddhac2c72862011-08-23 17:05:19 -07003614 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003615 if (force_on)
3616 panic("tboot: Failed to initialize DMAR device scope\n");
3617 return -ENODEV;
3618 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003619
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003620 if (no_iommu || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003621 return -ENODEV;
3622
Joseph Cihula51a63e62011-03-21 11:04:24 -07003623 if (iommu_init_mempool()) {
3624 if (force_on)
3625 panic("tboot: Failed to initialize iommu memory\n");
3626 return -ENODEV;
3627 }
3628
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003629 if (list_empty(&dmar_rmrr_units))
3630 printk(KERN_INFO "DMAR: No RMRR found\n");
3631
3632 if (list_empty(&dmar_atsr_units))
3633 printk(KERN_INFO "DMAR: No ATSR found\n");
3634
Joseph Cihula51a63e62011-03-21 11:04:24 -07003635 if (dmar_init_reserved_ranges()) {
3636 if (force_on)
3637 panic("tboot: Failed to reserve iommu ranges\n");
3638 return -ENODEV;
3639 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003640
3641 init_no_remapping_devices();
3642
Joseph Cihulab7792602011-05-03 00:08:37 -07003643 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003644 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003645 if (force_on)
3646 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003647 printk(KERN_ERR "IOMMU: dmar init failed\n");
3648 put_iova_domain(&reserved_iova_list);
3649 iommu_exit_mempool();
3650 return ret;
3651 }
3652 printk(KERN_INFO
3653 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3654
mark gross5e0d2a62008-03-04 15:22:08 -08003655 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003656#ifdef CONFIG_SWIOTLB
3657 swiotlb = 0;
3658#endif
David Woodhouse19943b02009-08-04 16:19:20 +01003659 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003660
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003661 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003662
Joerg Roedel4236d97d2011-09-06 17:56:07 +02003663 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003664
Fenghua Yu99dcade2009-11-11 07:23:06 -08003665 bus_register_notifier(&pci_bus_type, &device_nb);
3666
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02003667 intel_iommu_enabled = 1;
3668
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003669 return 0;
3670}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003671
Han, Weidong3199aa62009-02-26 17:31:12 +08003672static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3673 struct pci_dev *pdev)
3674{
3675 struct pci_dev *tmp, *parent;
3676
3677 if (!iommu || !pdev)
3678 return;
3679
3680 /* dependent device detach */
3681 tmp = pci_find_upstream_pcie_bridge(pdev);
3682 /* Secondary interface's bus number and devfn 0 */
3683 if (tmp) {
3684 parent = pdev->bus->self;
3685 while (parent != tmp) {
3686 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003687 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003688 parent = parent->bus->self;
3689 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05003690 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08003691 iommu_detach_dev(iommu,
3692 tmp->subordinate->number, 0);
3693 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003694 iommu_detach_dev(iommu, tmp->bus->number,
3695 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003696 }
3697}
3698
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003699static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003700 struct pci_dev *pdev)
3701{
3702 struct device_domain_info *info;
3703 struct intel_iommu *iommu;
3704 unsigned long flags;
3705 int found = 0;
3706 struct list_head *entry, *tmp;
3707
David Woodhouse276dbf992009-04-04 01:45:37 +01003708 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3709 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003710 if (!iommu)
3711 return;
3712
3713 spin_lock_irqsave(&device_domain_lock, flags);
3714 list_for_each_safe(entry, tmp, &domain->devices) {
3715 info = list_entry(entry, struct device_domain_info, link);
Mike Habeck8519dc42011-05-28 13:15:07 -05003716 if (info->segment == pci_domain_nr(pdev->bus) &&
3717 info->bus == pdev->bus->number &&
Weidong Hanc7151a82008-12-08 22:51:37 +08003718 info->devfn == pdev->devfn) {
3719 list_del(&info->link);
3720 list_del(&info->global);
3721 if (info->dev)
3722 info->dev->dev.archdata.iommu = NULL;
3723 spin_unlock_irqrestore(&device_domain_lock, flags);
3724
Yu Zhao93a23a72009-05-18 13:51:37 +08003725 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003726 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003727 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003728 free_devinfo_mem(info);
3729
3730 spin_lock_irqsave(&device_domain_lock, flags);
3731
3732 if (found)
3733 break;
3734 else
3735 continue;
3736 }
3737
3738 /* if there is no other devices under the same iommu
3739 * owned by this domain, clear this iommu in iommu_bmp
3740 * update iommu count and coherency
3741 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003742 if (iommu == device_to_iommu(info->segment, info->bus,
3743 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003744 found = 1;
3745 }
3746
Roland Dreier3e7abe22011-07-20 06:22:21 -07003747 spin_unlock_irqrestore(&device_domain_lock, flags);
3748
Weidong Hanc7151a82008-12-08 22:51:37 +08003749 if (found == 0) {
3750 unsigned long tmp_flags;
3751 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3752 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3753 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003754 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003755 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07003756
Alex Williamson9b4554b2011-05-24 12:19:04 -04003757 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3758 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3759 spin_lock_irqsave(&iommu->lock, tmp_flags);
3760 clear_bit(domain->id, iommu->domain_ids);
3761 iommu->domains[domain->id] = NULL;
3762 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3763 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003764 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003765}
3766
3767static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3768{
3769 struct device_domain_info *info;
3770 struct intel_iommu *iommu;
3771 unsigned long flags1, flags2;
3772
3773 spin_lock_irqsave(&device_domain_lock, flags1);
3774 while (!list_empty(&domain->devices)) {
3775 info = list_entry(domain->devices.next,
3776 struct device_domain_info, link);
3777 list_del(&info->link);
3778 list_del(&info->global);
3779 if (info->dev)
3780 info->dev->dev.archdata.iommu = NULL;
3781
3782 spin_unlock_irqrestore(&device_domain_lock, flags1);
3783
Yu Zhao93a23a72009-05-18 13:51:37 +08003784 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003785 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003786 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003787 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003788
3789 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003790 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003791 */
3792 spin_lock_irqsave(&domain->iommu_lock, flags2);
3793 if (test_and_clear_bit(iommu->seq_id,
3794 &domain->iommu_bmp)) {
3795 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003796 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003797 }
3798 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3799
3800 free_devinfo_mem(info);
3801 spin_lock_irqsave(&device_domain_lock, flags1);
3802 }
3803 spin_unlock_irqrestore(&device_domain_lock, flags1);
3804}
3805
Weidong Han5e98c4b2008-12-08 23:03:27 +08003806/* domain id for virtual machine, it won't be set in context */
3807static unsigned long vm_domid;
3808
3809static struct dmar_domain *iommu_alloc_vm_domain(void)
3810{
3811 struct dmar_domain *domain;
3812
3813 domain = alloc_domain_mem();
3814 if (!domain)
3815 return NULL;
3816
3817 domain->id = vm_domid++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003818 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003819 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3820 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3821
3822 return domain;
3823}
3824
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003825static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003826{
3827 int adjust_width;
3828
3829 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003830 spin_lock_init(&domain->iommu_lock);
3831
3832 domain_reserve_special_ranges(domain);
3833
3834 /* calculate AGAW */
3835 domain->gaw = guest_width;
3836 adjust_width = guestwidth_to_adjustwidth(guest_width);
3837 domain->agaw = width_to_agaw(adjust_width);
3838
3839 INIT_LIST_HEAD(&domain->devices);
3840
3841 domain->iommu_count = 0;
3842 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003843 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01003844 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003845 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003846 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003847
3848 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07003849 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003850 if (!domain->pgd)
3851 return -ENOMEM;
3852 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3853 return 0;
3854}
3855
3856static void iommu_free_vm_domain(struct dmar_domain *domain)
3857{
3858 unsigned long flags;
3859 struct dmar_drhd_unit *drhd;
3860 struct intel_iommu *iommu;
3861 unsigned long i;
3862 unsigned long ndomains;
3863
3864 for_each_drhd_unit(drhd) {
3865 if (drhd->ignored)
3866 continue;
3867 iommu = drhd->iommu;
3868
3869 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08003870 for_each_set_bit(i, iommu->domain_ids, ndomains) {
Weidong Han5e98c4b2008-12-08 23:03:27 +08003871 if (iommu->domains[i] == domain) {
3872 spin_lock_irqsave(&iommu->lock, flags);
3873 clear_bit(i, iommu->domain_ids);
3874 iommu->domains[i] = NULL;
3875 spin_unlock_irqrestore(&iommu->lock, flags);
3876 break;
3877 }
Weidong Han5e98c4b2008-12-08 23:03:27 +08003878 }
3879 }
3880}
3881
3882static void vm_domain_exit(struct dmar_domain *domain)
3883{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003884 /* Domain 0 is reserved, so dont process it */
3885 if (!domain)
3886 return;
3887
3888 vm_domain_remove_all_dev_info(domain);
3889 /* destroy iovas */
3890 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003891
3892 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003893 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003894
3895 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003896 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003897
3898 iommu_free_vm_domain(domain);
3899 free_domain_mem(domain);
3900}
3901
Joerg Roedel5d450802008-12-03 14:52:32 +01003902static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003903{
Joerg Roedel5d450802008-12-03 14:52:32 +01003904 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003905
Joerg Roedel5d450802008-12-03 14:52:32 +01003906 dmar_domain = iommu_alloc_vm_domain();
3907 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003908 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003909 "intel_iommu_domain_init: dmar_domain == NULL\n");
3910 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003911 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003912 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003913 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003914 "intel_iommu_domain_init() failed\n");
3915 vm_domain_exit(dmar_domain);
3916 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003917 }
Allen Kay8140a952011-10-14 12:32:17 -07003918 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01003919 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003920
Joerg Roedel5d450802008-12-03 14:52:32 +01003921 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003922}
Kay, Allen M38717942008-09-09 18:37:29 +03003923
Joerg Roedel5d450802008-12-03 14:52:32 +01003924static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003925{
Joerg Roedel5d450802008-12-03 14:52:32 +01003926 struct dmar_domain *dmar_domain = domain->priv;
3927
3928 domain->priv = NULL;
3929 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003930}
Kay, Allen M38717942008-09-09 18:37:29 +03003931
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003932static int intel_iommu_attach_device(struct iommu_domain *domain,
3933 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003934{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003935 struct dmar_domain *dmar_domain = domain->priv;
3936 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003937 struct intel_iommu *iommu;
3938 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03003939
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003940 /* normally pdev is not mapped */
3941 if (unlikely(domain_context_mapped(pdev))) {
3942 struct dmar_domain *old_domain;
3943
3944 old_domain = find_domain(pdev);
3945 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003946 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3947 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3948 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003949 else
3950 domain_remove_dev_info(old_domain);
3951 }
3952 }
3953
David Woodhouse276dbf992009-04-04 01:45:37 +01003954 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3955 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003956 if (!iommu)
3957 return -ENODEV;
3958
3959 /* check if this iommu agaw is sufficient for max mapped address */
3960 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01003961 if (addr_width > cap_mgaw(iommu->cap))
3962 addr_width = cap_mgaw(iommu->cap);
3963
3964 if (dmar_domain->max_addr > (1LL << addr_width)) {
3965 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003966 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01003967 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003968 return -EFAULT;
3969 }
Tom Lyona99c47a2010-05-17 08:20:45 +01003970 dmar_domain->gaw = addr_width;
3971
3972 /*
3973 * Knock out extra levels of page tables if necessary
3974 */
3975 while (iommu->agaw < dmar_domain->agaw) {
3976 struct dma_pte *pte;
3977
3978 pte = dmar_domain->pgd;
3979 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08003980 dmar_domain->pgd = (struct dma_pte *)
3981 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01003982 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01003983 }
3984 dmar_domain->agaw--;
3985 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003986
David Woodhouse5fe60f42009-08-09 10:53:41 +01003987 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003988}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003989
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003990static void intel_iommu_detach_device(struct iommu_domain *domain,
3991 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003992{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003993 struct dmar_domain *dmar_domain = domain->priv;
3994 struct pci_dev *pdev = to_pci_dev(dev);
3995
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003996 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003997}
Kay, Allen M38717942008-09-09 18:37:29 +03003998
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003999static int intel_iommu_map(struct iommu_domain *domain,
4000 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004001 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004002{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004003 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004004 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004005 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004006 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004007
Joerg Roedeldde57a22008-12-03 15:04:09 +01004008 if (iommu_prot & IOMMU_READ)
4009 prot |= DMA_PTE_READ;
4010 if (iommu_prot & IOMMU_WRITE)
4011 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004012 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4013 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004014
David Woodhouse163cc522009-06-28 00:51:17 +01004015 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004016 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004017 u64 end;
4018
4019 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004020 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004021 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004022 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004023 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004024 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004025 return -EFAULT;
4026 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004027 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004028 }
David Woodhousead051222009-06-28 14:22:28 +01004029 /* Round up size to next multiple of PAGE_SIZE, if it and
4030 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004031 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004032 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4033 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004034 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004035}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004036
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004037static size_t intel_iommu_unmap(struct iommu_domain *domain,
4038 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004039{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004040 struct dmar_domain *dmar_domain = domain->priv;
Allen Kay292827c2011-10-14 12:31:54 -07004041 int order;
Sheng Yang4b99d352009-07-08 11:52:52 +01004042
Allen Kay292827c2011-10-14 12:31:54 -07004043 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
David Woodhouse163cc522009-06-28 00:51:17 +01004044 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004045
David Woodhouse163cc522009-06-28 00:51:17 +01004046 if (dmar_domain->max_addr == iova + size)
4047 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004048
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004049 return PAGE_SIZE << order;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004050}
Kay, Allen M38717942008-09-09 18:37:29 +03004051
Joerg Roedeld14d6572008-12-03 15:06:57 +01004052static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4053 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004054{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004055 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004056 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004057 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004058
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004059 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
Kay, Allen M38717942008-09-09 18:37:29 +03004060 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004061 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004062
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004063 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004064}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004065
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004066static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4067 unsigned long cap)
4068{
4069 struct dmar_domain *dmar_domain = domain->priv;
4070
4071 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4072 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004073 if (cap == IOMMU_CAP_INTR_REMAP)
4074 return intr_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004075
4076 return 0;
4077}
4078
Alex Williamson70ae6f02011-10-21 15:56:11 -04004079/*
4080 * Group numbers are arbitrary. Device with the same group number
4081 * indicate the iommu cannot differentiate between them. To avoid
4082 * tracking used groups we just use the seg|bus|devfn of the lowest
4083 * level we're able to differentiate devices
4084 */
4085static int intel_iommu_device_group(struct device *dev, unsigned int *groupid)
4086{
4087 struct pci_dev *pdev = to_pci_dev(dev);
4088 struct pci_dev *bridge;
4089 union {
4090 struct {
4091 u8 devfn;
4092 u8 bus;
4093 u16 segment;
4094 } pci;
4095 u32 group;
4096 } id;
4097
4098 if (iommu_no_mapping(dev))
4099 return -ENODEV;
4100
4101 id.pci.segment = pci_domain_nr(pdev->bus);
4102 id.pci.bus = pdev->bus->number;
4103 id.pci.devfn = pdev->devfn;
4104
4105 if (!device_to_iommu(id.pci.segment, id.pci.bus, id.pci.devfn))
4106 return -ENODEV;
4107
4108 bridge = pci_find_upstream_pcie_bridge(pdev);
4109 if (bridge) {
4110 if (pci_is_pcie(bridge)) {
4111 id.pci.bus = bridge->subordinate->number;
4112 id.pci.devfn = 0;
4113 } else {
4114 id.pci.bus = bridge->bus->number;
4115 id.pci.devfn = bridge->devfn;
4116 }
4117 }
4118
Alex Williamsonbcb71ab2011-10-21 15:56:24 -04004119 if (!pdev->is_virtfn && iommu_group_mf)
4120 id.pci.devfn = PCI_DEVFN(PCI_SLOT(id.pci.devfn), 0);
4121
Alex Williamson70ae6f02011-10-21 15:56:11 -04004122 *groupid = id.group;
4123
4124 return 0;
4125}
4126
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004127static struct iommu_ops intel_iommu_ops = {
4128 .domain_init = intel_iommu_domain_init,
4129 .domain_destroy = intel_iommu_domain_destroy,
4130 .attach_dev = intel_iommu_attach_device,
4131 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004132 .map = intel_iommu_map,
4133 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004134 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004135 .domain_has_cap = intel_iommu_domain_has_cap,
Alex Williamson70ae6f02011-10-21 15:56:11 -04004136 .device_group = intel_iommu_device_group,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004137 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004138};
David Woodhouse9af88142009-02-13 23:18:03 +00004139
4140static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
4141{
4142 /*
4143 * Mobile 4 Series Chipset neglects to set RWBF capability,
4144 * but needs it:
4145 */
4146 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4147 rwbf_quirk = 1;
David Woodhouse2d9e6672010-06-15 10:57:57 +01004148
4149 /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
4150 if (dev->revision == 0x07) {
4151 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4152 dmar_map_gfx = 0;
4153 }
David Woodhouse9af88142009-02-13 23:18:03 +00004154}
4155
4156DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004157
Adam Jacksoneecfd572010-08-25 21:17:34 +01004158#define GGC 0x52
4159#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4160#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4161#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4162#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4163#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4164#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4165#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4166#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4167
David Woodhouse9eecabc2010-09-21 22:28:23 +01004168static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4169{
4170 unsigned short ggc;
4171
Adam Jacksoneecfd572010-08-25 21:17:34 +01004172 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004173 return;
4174
Adam Jacksoneecfd572010-08-25 21:17:34 +01004175 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004176 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4177 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004178 } else if (dmar_map_gfx) {
4179 /* we have to ensure the gfx device is idle before we flush */
4180 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4181 intel_iommu_strict = 1;
4182 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004183}
4184DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4185DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4186DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4187DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4188
David Woodhousee0fc7e02009-09-30 09:12:17 -07004189/* On Tylersburg chipsets, some BIOSes have been known to enable the
4190 ISOCH DMAR unit for the Azalia sound device, but not give it any
4191 TLB entries, which causes it to deadlock. Check for that. We do
4192 this in a function called from init_dmars(), instead of in a PCI
4193 quirk, because we don't want to print the obnoxious "BIOS broken"
4194 message if VT-d is actually disabled.
4195*/
4196static void __init check_tylersburg_isoch(void)
4197{
4198 struct pci_dev *pdev;
4199 uint32_t vtisochctrl;
4200
4201 /* If there's no Azalia in the system anyway, forget it. */
4202 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4203 if (!pdev)
4204 return;
4205 pci_dev_put(pdev);
4206
4207 /* System Management Registers. Might be hidden, in which case
4208 we can't do the sanity check. But that's OK, because the
4209 known-broken BIOSes _don't_ actually hide it, so far. */
4210 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4211 if (!pdev)
4212 return;
4213
4214 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4215 pci_dev_put(pdev);
4216 return;
4217 }
4218
4219 pci_dev_put(pdev);
4220
4221 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4222 if (vtisochctrl & 1)
4223 return;
4224
4225 /* Drop all bits other than the number of TLB entries */
4226 vtisochctrl &= 0x1c;
4227
4228 /* If we have the recommended number of TLB entries (16), fine. */
4229 if (vtisochctrl == 0x10)
4230 return;
4231
4232 /* Zero TLB entries? You get to ride the short bus to school. */
4233 if (!vtisochctrl) {
4234 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4235 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4236 dmi_get_system_info(DMI_BIOS_VENDOR),
4237 dmi_get_system_info(DMI_BIOS_VERSION),
4238 dmi_get_system_info(DMI_PRODUCT_VERSION));
4239 iommu_identity_mapping |= IDENTMAP_AZALIA;
4240 return;
4241 }
4242
4243 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4244 vtisochctrl);
4245}