blob: c3ceebb5be84168ae71b845bdc52b3c9a6b557b3 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080035#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010037#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030038#include <linux/intel-iommu.h>
Fenghua Yuf59c7b62009-03-27 14:22:42 -070039#include <linux/sysdev.h>
Shane Wang69575d32009-09-01 18:25:07 -070040#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100041#include <linux/dmi.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070042#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090043#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070044#include "pci.h"
45
Fenghua Yu5b6985c2008-10-16 18:02:32 -070046#define ROOT_SIZE VTD_PAGE_SIZE
47#define CONTEXT_SIZE VTD_PAGE_SIZE
48
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070049#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
50#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070051#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052
53#define IOAPIC_RANGE_START (0xfee00000)
54#define IOAPIC_RANGE_END (0xfeefffff)
55#define IOVA_START_ADDR (0x1000)
56
57#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
58
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070059#define MAX_AGAW_WIDTH 64
60
David Woodhouse2ebe3152009-09-19 07:34:04 -070061#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
62#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
63
64/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
65 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
66#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
67 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
68#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070069
Mark McLoughlinf27be032008-11-20 15:49:43 +000070#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070071#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070072#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080073
David Woodhousefd18de52009-05-10 23:57:41 +010074
David Woodhousedd4e8312009-06-27 16:21:20 +010075/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
76 are never going to work. */
77static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
78{
79 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
80}
81
82static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
83{
84 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
85}
86static inline unsigned long page_to_dma_pfn(struct page *pg)
87{
88 return mm_to_dma_pfn(page_to_pfn(pg));
89}
90static inline unsigned long virt_to_dma_pfn(void *p)
91{
92 return page_to_dma_pfn(virt_to_page(p));
93}
94
Weidong Hand9630fe2008-12-08 11:06:32 +080095/* global iommu list, set NULL for ignored DMAR units */
96static struct intel_iommu **g_iommus;
97
David Woodhousee0fc7e02009-09-30 09:12:17 -070098static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +000099static int rwbf_quirk;
100
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000101/*
102 * 0: Present
103 * 1-11: Reserved
104 * 12-63: Context Ptr (12 - (haw-1))
105 * 64-127: Reserved
106 */
107struct root_entry {
108 u64 val;
109 u64 rsvd1;
110};
111#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
112static inline bool root_present(struct root_entry *root)
113{
114 return (root->val & 1);
115}
116static inline void set_root_present(struct root_entry *root)
117{
118 root->val |= 1;
119}
120static inline void set_root_value(struct root_entry *root, unsigned long value)
121{
122 root->val |= value & VTD_PAGE_MASK;
123}
124
125static inline struct context_entry *
126get_context_addr_from_root(struct root_entry *root)
127{
128 return (struct context_entry *)
129 (root_present(root)?phys_to_virt(
130 root->val & VTD_PAGE_MASK) :
131 NULL);
132}
133
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000134/*
135 * low 64 bits:
136 * 0: present
137 * 1: fault processing disable
138 * 2-3: translation type
139 * 12-63: address space root
140 * high 64 bits:
141 * 0-2: address width
142 * 3-6: aval
143 * 8-23: domain id
144 */
145struct context_entry {
146 u64 lo;
147 u64 hi;
148};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000149
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000150static inline bool context_present(struct context_entry *context)
151{
152 return (context->lo & 1);
153}
154static inline void context_set_present(struct context_entry *context)
155{
156 context->lo |= 1;
157}
158
159static inline void context_set_fault_enable(struct context_entry *context)
160{
161 context->lo &= (((u64)-1) << 2) | 1;
162}
163
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000164static inline void context_set_translation_type(struct context_entry *context,
165 unsigned long value)
166{
167 context->lo &= (((u64)-1) << 4) | 3;
168 context->lo |= (value & 3) << 2;
169}
170
171static inline void context_set_address_root(struct context_entry *context,
172 unsigned long value)
173{
174 context->lo |= value & VTD_PAGE_MASK;
175}
176
177static inline void context_set_address_width(struct context_entry *context,
178 unsigned long value)
179{
180 context->hi |= value & 7;
181}
182
183static inline void context_set_domain_id(struct context_entry *context,
184 unsigned long value)
185{
186 context->hi |= (value & ((1 << 16) - 1)) << 8;
187}
188
189static inline void context_clear_entry(struct context_entry *context)
190{
191 context->lo = 0;
192 context->hi = 0;
193}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000194
Mark McLoughlin622ba122008-11-20 15:49:46 +0000195/*
196 * 0: readable
197 * 1: writable
198 * 2-6: reserved
199 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800200 * 8-10: available
201 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000202 * 12-63: Host physcial address
203 */
204struct dma_pte {
205 u64 val;
206};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000207
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000208static inline void dma_clear_pte(struct dma_pte *pte)
209{
210 pte->val = 0;
211}
212
213static inline void dma_set_pte_readable(struct dma_pte *pte)
214{
215 pte->val |= DMA_PTE_READ;
216}
217
218static inline void dma_set_pte_writable(struct dma_pte *pte)
219{
220 pte->val |= DMA_PTE_WRITE;
221}
222
Sheng Yang9cf066972009-03-18 15:33:07 +0800223static inline void dma_set_pte_snp(struct dma_pte *pte)
224{
225 pte->val |= DMA_PTE_SNP;
226}
227
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000228static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
229{
230 pte->val = (pte->val & ~3) | (prot & 3);
231}
232
233static inline u64 dma_pte_addr(struct dma_pte *pte)
234{
David Woodhousec85994e2009-07-01 19:21:24 +0100235#ifdef CONFIG_64BIT
236 return pte->val & VTD_PAGE_MASK;
237#else
238 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100239 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100240#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000241}
242
David Woodhousedd4e8312009-06-27 16:21:20 +0100243static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000244{
David Woodhousedd4e8312009-06-27 16:21:20 +0100245 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000246}
247
248static inline bool dma_pte_present(struct dma_pte *pte)
249{
250 return (pte->val & 3) != 0;
251}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000252
David Woodhouse75e6bf92009-07-02 11:21:16 +0100253static inline int first_pte_in_page(struct dma_pte *pte)
254{
255 return !((unsigned long)pte & ~VTD_PAGE_MASK);
256}
257
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700258/*
259 * This domain is a statically identity mapping domain.
260 * 1. This domain creats a static 1:1 mapping to all usable memory.
261 * 2. It maps to each iommu if successful.
262 * 3. Each iommu mapps to this domain if successful.
263 */
David Woodhouse19943b02009-08-04 16:19:20 +0100264static struct dmar_domain *si_domain;
265static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700266
Weidong Han3b5410e2008-12-08 09:17:15 +0800267/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100268#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800269
Weidong Han1ce28fe2008-12-08 16:35:39 +0800270/* domain represents a virtual machine, more than one devices
271 * across iommus may be owned in one domain, e.g. kvm guest.
272 */
273#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
274
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700275/* si_domain contains mulitple devices */
276#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
277
Mark McLoughlin99126f72008-11-20 15:49:47 +0000278struct dmar_domain {
279 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700280 int nid; /* node id */
Weidong Han8c11e792008-12-08 15:29:22 +0800281 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000282
283 struct list_head devices; /* all devices' list */
284 struct iova_domain iovad; /* iova's that belong to this domain */
285
286 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000287 int gaw; /* max guest address width */
288
289 /* adjusted guest address width, 0 is level 2 30-bit */
290 int agaw;
291
Weidong Han3b5410e2008-12-08 09:17:15 +0800292 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800293
294 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800295 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800296 int iommu_count; /* reference count of iommu */
297 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800298 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000299};
300
Mark McLoughlina647dac2008-11-20 15:49:48 +0000301/* PCI domain-device relationship */
302struct device_domain_info {
303 struct list_head link; /* link to domain siblings */
304 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100305 int segment; /* PCI domain */
306 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000307 u8 devfn; /* PCI devfn number */
Stefan Assmann45e829e2009-12-03 06:49:24 -0500308 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800309 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000310 struct dmar_domain *domain; /* pointer to domain */
311};
312
mark gross5e0d2a62008-03-04 15:22:08 -0800313static void flush_unmaps_timeout(unsigned long data);
314
315DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
316
mark gross80b20dd2008-04-18 13:53:58 -0700317#define HIGH_WATER_MARK 250
318struct deferred_flush_tables {
319 int next;
320 struct iova *iova[HIGH_WATER_MARK];
321 struct dmar_domain *domain[HIGH_WATER_MARK];
322};
323
324static struct deferred_flush_tables *deferred_flush;
325
mark gross5e0d2a62008-03-04 15:22:08 -0800326/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800327static int g_num_of_iommus;
328
329static DEFINE_SPINLOCK(async_umap_flush_lock);
330static LIST_HEAD(unmaps_to_do);
331
332static int timer_on;
333static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800334
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700335static void domain_remove_dev_info(struct dmar_domain *domain);
336
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800337#ifdef CONFIG_DMAR_DEFAULT_ON
338int dmar_disabled = 0;
339#else
340int dmar_disabled = 1;
341#endif /*CONFIG_DMAR_DEFAULT_ON*/
342
David Woodhouse2d9e6672010-06-15 10:57:57 +0100343static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700344static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800345static int intel_iommu_strict;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700346
347#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
348static DEFINE_SPINLOCK(device_domain_lock);
349static LIST_HEAD(device_domain_list);
350
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100351static struct iommu_ops intel_iommu_ops;
352
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700353static int __init intel_iommu_setup(char *str)
354{
355 if (!str)
356 return -EINVAL;
357 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800358 if (!strncmp(str, "on", 2)) {
359 dmar_disabled = 0;
360 printk(KERN_INFO "Intel-IOMMU: enabled\n");
361 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700362 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800363 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700364 } else if (!strncmp(str, "igfx_off", 8)) {
365 dmar_map_gfx = 0;
366 printk(KERN_INFO
367 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700368 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800369 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700370 "Intel-IOMMU: Forcing DAC for PCI devices\n");
371 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800372 } else if (!strncmp(str, "strict", 6)) {
373 printk(KERN_INFO
374 "Intel-IOMMU: disable batched IOTLB flush\n");
375 intel_iommu_strict = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700376 }
377
378 str += strcspn(str, ",");
379 while (*str == ',')
380 str++;
381 }
382 return 0;
383}
384__setup("intel_iommu=", intel_iommu_setup);
385
386static struct kmem_cache *iommu_domain_cache;
387static struct kmem_cache *iommu_devinfo_cache;
388static struct kmem_cache *iommu_iova_cache;
389
Suresh Siddha4c923d42009-10-02 11:01:24 -0700390static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700391{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700392 struct page *page;
393 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700394
Suresh Siddha4c923d42009-10-02 11:01:24 -0700395 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
396 if (page)
397 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700398 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700399}
400
401static inline void free_pgtable_page(void *vaddr)
402{
403 free_page((unsigned long)vaddr);
404}
405
406static inline void *alloc_domain_mem(void)
407{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900408 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700409}
410
Kay, Allen M38717942008-09-09 18:37:29 +0300411static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700412{
413 kmem_cache_free(iommu_domain_cache, vaddr);
414}
415
416static inline void * alloc_devinfo_mem(void)
417{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900418 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700419}
420
421static inline void free_devinfo_mem(void *vaddr)
422{
423 kmem_cache_free(iommu_devinfo_cache, vaddr);
424}
425
426struct iova *alloc_iova_mem(void)
427{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900428 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700429}
430
431void free_iova_mem(struct iova *iova)
432{
433 kmem_cache_free(iommu_iova_cache, iova);
434}
435
Weidong Han1b573682008-12-08 15:34:06 +0800436
437static inline int width_to_agaw(int width);
438
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700439static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800440{
441 unsigned long sagaw;
442 int agaw = -1;
443
444 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700445 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800446 agaw >= 0; agaw--) {
447 if (test_bit(agaw, &sagaw))
448 break;
449 }
450
451 return agaw;
452}
453
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700454/*
455 * Calculate max SAGAW for each iommu.
456 */
457int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
458{
459 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
460}
461
462/*
463 * calculate agaw for each iommu.
464 * "SAGAW" may be different across iommus, use a default agaw, and
465 * get a supported less agaw for iommus that don't support the default agaw.
466 */
467int iommu_calculate_agaw(struct intel_iommu *iommu)
468{
469 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
470}
471
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700472/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800473static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
474{
475 int iommu_id;
476
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700477 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800478 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700479 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800480
Weidong Han8c11e792008-12-08 15:29:22 +0800481 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
482 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
483 return NULL;
484
485 return g_iommus[iommu_id];
486}
487
Weidong Han8e6040972008-12-08 15:49:06 +0800488static void domain_update_iommu_coherency(struct dmar_domain *domain)
489{
490 int i;
491
492 domain->iommu_coherency = 1;
493
Akinobu Mitaa45946a2010-03-11 14:04:08 -0800494 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
Weidong Han8e6040972008-12-08 15:49:06 +0800495 if (!ecap_coherent(g_iommus[i]->ecap)) {
496 domain->iommu_coherency = 0;
497 break;
498 }
Weidong Han8e6040972008-12-08 15:49:06 +0800499 }
500}
501
Sheng Yang58c610b2009-03-18 15:33:05 +0800502static void domain_update_iommu_snooping(struct dmar_domain *domain)
503{
504 int i;
505
506 domain->iommu_snooping = 1;
507
Akinobu Mitaa45946a2010-03-11 14:04:08 -0800508 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800509 if (!ecap_sc_support(g_iommus[i]->ecap)) {
510 domain->iommu_snooping = 0;
511 break;
512 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800513 }
514}
515
516/* Some capabilities may be different across iommus */
517static void domain_update_iommu_cap(struct dmar_domain *domain)
518{
519 domain_update_iommu_coherency(domain);
520 domain_update_iommu_snooping(domain);
521}
522
David Woodhouse276dbf992009-04-04 01:45:37 +0100523static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800524{
525 struct dmar_drhd_unit *drhd = NULL;
526 int i;
527
528 for_each_drhd_unit(drhd) {
529 if (drhd->ignored)
530 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100531 if (segment != drhd->segment)
532 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800533
David Woodhouse924b6232009-04-04 00:39:25 +0100534 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000535 if (drhd->devices[i] &&
536 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800537 drhd->devices[i]->devfn == devfn)
538 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700539 if (drhd->devices[i] &&
540 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100541 drhd->devices[i]->subordinate->number <= bus &&
542 drhd->devices[i]->subordinate->subordinate >= bus)
543 return drhd->iommu;
544 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800545
546 if (drhd->include_all)
547 return drhd->iommu;
548 }
549
550 return NULL;
551}
552
Weidong Han5331fe62008-12-08 23:00:00 +0800553static void domain_flush_cache(struct dmar_domain *domain,
554 void *addr, int size)
555{
556 if (!domain->iommu_coherency)
557 clflush_cache_range(addr, size);
558}
559
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700560/* Gets context entry for a given bus and devfn */
561static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
562 u8 bus, u8 devfn)
563{
564 struct root_entry *root;
565 struct context_entry *context;
566 unsigned long phy_addr;
567 unsigned long flags;
568
569 spin_lock_irqsave(&iommu->lock, flags);
570 root = &iommu->root_entry[bus];
571 context = get_context_addr_from_root(root);
572 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700573 context = (struct context_entry *)
574 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700575 if (!context) {
576 spin_unlock_irqrestore(&iommu->lock, flags);
577 return NULL;
578 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700579 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700580 phy_addr = virt_to_phys((void *)context);
581 set_root_value(root, phy_addr);
582 set_root_present(root);
583 __iommu_flush_cache(iommu, root, sizeof(*root));
584 }
585 spin_unlock_irqrestore(&iommu->lock, flags);
586 return &context[devfn];
587}
588
589static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
590{
591 struct root_entry *root;
592 struct context_entry *context;
593 int ret;
594 unsigned long flags;
595
596 spin_lock_irqsave(&iommu->lock, flags);
597 root = &iommu->root_entry[bus];
598 context = get_context_addr_from_root(root);
599 if (!context) {
600 ret = 0;
601 goto out;
602 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000603 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700604out:
605 spin_unlock_irqrestore(&iommu->lock, flags);
606 return ret;
607}
608
609static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
610{
611 struct root_entry *root;
612 struct context_entry *context;
613 unsigned long flags;
614
615 spin_lock_irqsave(&iommu->lock, flags);
616 root = &iommu->root_entry[bus];
617 context = get_context_addr_from_root(root);
618 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000619 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700620 __iommu_flush_cache(iommu, &context[devfn], \
621 sizeof(*context));
622 }
623 spin_unlock_irqrestore(&iommu->lock, flags);
624}
625
626static void free_context_table(struct intel_iommu *iommu)
627{
628 struct root_entry *root;
629 int i;
630 unsigned long flags;
631 struct context_entry *context;
632
633 spin_lock_irqsave(&iommu->lock, flags);
634 if (!iommu->root_entry) {
635 goto out;
636 }
637 for (i = 0; i < ROOT_ENTRY_NR; i++) {
638 root = &iommu->root_entry[i];
639 context = get_context_addr_from_root(root);
640 if (context)
641 free_pgtable_page(context);
642 }
643 free_pgtable_page(iommu->root_entry);
644 iommu->root_entry = NULL;
645out:
646 spin_unlock_irqrestore(&iommu->lock, flags);
647}
648
649/* page table handling */
650#define LEVEL_STRIDE (9)
651#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
652
653static inline int agaw_to_level(int agaw)
654{
655 return agaw + 2;
656}
657
658static inline int agaw_to_width(int agaw)
659{
660 return 30 + agaw * LEVEL_STRIDE;
661
662}
663
664static inline int width_to_agaw(int width)
665{
666 return (width - 30) / LEVEL_STRIDE;
667}
668
669static inline unsigned int level_to_offset_bits(int level)
670{
David Woodhouse6660c632009-06-27 22:41:00 +0100671 return (level - 1) * LEVEL_STRIDE;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700672}
673
David Woodhouse77dfa562009-06-27 16:40:08 +0100674static inline int pfn_level_offset(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700675{
David Woodhouse6660c632009-06-27 22:41:00 +0100676 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700677}
678
David Woodhouse6660c632009-06-27 22:41:00 +0100679static inline unsigned long level_mask(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700680{
David Woodhouse6660c632009-06-27 22:41:00 +0100681 return -1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700682}
683
David Woodhouse6660c632009-06-27 22:41:00 +0100684static inline unsigned long level_size(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700685{
David Woodhouse6660c632009-06-27 22:41:00 +0100686 return 1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700687}
688
David Woodhouse6660c632009-06-27 22:41:00 +0100689static inline unsigned long align_to_level(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700690{
David Woodhouse6660c632009-06-27 22:41:00 +0100691 return (pfn + level_size(level) - 1) & level_mask(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700692}
693
David Woodhouseb026fd22009-06-28 10:37:25 +0100694static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
695 unsigned long pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700696{
David Woodhouseb026fd22009-06-28 10:37:25 +0100697 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700698 struct dma_pte *parent, *pte = NULL;
699 int level = agaw_to_level(domain->agaw);
700 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700701
702 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100703 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700704 parent = domain->pgd;
705
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700706 while (level > 0) {
707 void *tmp_page;
708
David Woodhouseb026fd22009-06-28 10:37:25 +0100709 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700710 pte = &parent[offset];
711 if (level == 1)
712 break;
713
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000714 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100715 uint64_t pteval;
716
Suresh Siddha4c923d42009-10-02 11:01:24 -0700717 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700718
David Woodhouse206a73c12009-07-01 19:30:28 +0100719 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700720 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100721
David Woodhousec85994e2009-07-01 19:21:24 +0100722 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400723 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100724 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
725 /* Someone else set it while we were thinking; use theirs. */
726 free_pgtable_page(tmp_page);
727 } else {
728 dma_pte_addr(pte);
729 domain_flush_cache(domain, pte, sizeof(*pte));
730 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700731 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000732 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700733 level--;
734 }
735
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700736 return pte;
737}
738
739/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100740static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
741 unsigned long pfn,
742 int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700743{
744 struct dma_pte *parent, *pte = NULL;
745 int total = agaw_to_level(domain->agaw);
746 int offset;
747
748 parent = domain->pgd;
749 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100750 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700751 pte = &parent[offset];
752 if (level == total)
753 return pte;
754
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000755 if (!dma_pte_present(pte))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700756 break;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000757 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700758 total--;
759 }
760 return NULL;
761}
762
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700763/* clear last level pte, a tlb flush should be followed */
David Woodhouse595badf2009-06-27 22:09:11 +0100764static void dma_pte_clear_range(struct dmar_domain *domain,
765 unsigned long start_pfn,
766 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700767{
David Woodhouse04b18e62009-06-27 19:15:01 +0100768 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100769 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700770
David Woodhouse04b18e62009-06-27 19:15:01 +0100771 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100772 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700773 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100774
David Woodhouse04b18e62009-06-27 19:15:01 +0100775 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700776 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100777 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
778 if (!pte) {
779 start_pfn = align_to_level(start_pfn + 1, 2);
780 continue;
781 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100782 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100783 dma_clear_pte(pte);
784 start_pfn++;
785 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100786 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
787
David Woodhouse310a5ab2009-06-28 18:52:20 +0100788 domain_flush_cache(domain, first_pte,
789 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700790
791 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700792}
793
794/* free page table pages. last level pte should already be cleared */
795static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100796 unsigned long start_pfn,
797 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700798{
David Woodhouse6660c632009-06-27 22:41:00 +0100799 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhousef3a0a522009-06-30 03:40:07 +0100800 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700801 int total = agaw_to_level(domain->agaw);
802 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100803 unsigned long tmp;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700804
David Woodhouse6660c632009-06-27 22:41:00 +0100805 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
806 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700807 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700808
David Woodhousef3a0a522009-06-30 03:40:07 +0100809 /* We don't need lock here; nobody else touches the iova range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700810 level = 2;
811 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100812 tmp = align_to_level(start_pfn, level);
813
David Woodhousef3a0a522009-06-30 03:40:07 +0100814 /* If we can't even clear one PTE at this level, we're done */
David Woodhouse6660c632009-06-27 22:41:00 +0100815 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700816 return;
817
David Woodhouse59c36282009-09-19 07:36:28 -0700818 do {
David Woodhousef3a0a522009-06-30 03:40:07 +0100819 first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
820 if (!pte) {
821 tmp = align_to_level(tmp + 1, level + 1);
822 continue;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700823 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100824 do {
David Woodhouse6a43e572009-07-02 12:02:34 +0100825 if (dma_pte_present(pte)) {
826 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
827 dma_clear_pte(pte);
828 }
David Woodhousef3a0a522009-06-30 03:40:07 +0100829 pte++;
830 tmp += level_size(level);
David Woodhouse75e6bf92009-07-02 11:21:16 +0100831 } while (!first_pte_in_page(pte) &&
832 tmp + level_size(level) - 1 <= last_pfn);
833
David Woodhousef3a0a522009-06-30 03:40:07 +0100834 domain_flush_cache(domain, first_pte,
835 (void *)pte - (void *)first_pte);
836
David Woodhouse59c36282009-09-19 07:36:28 -0700837 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700838 level++;
839 }
840 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100841 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700842 free_pgtable_page(domain->pgd);
843 domain->pgd = NULL;
844 }
845}
846
847/* iommu handling */
848static int iommu_alloc_root_entry(struct intel_iommu *iommu)
849{
850 struct root_entry *root;
851 unsigned long flags;
852
Suresh Siddha4c923d42009-10-02 11:01:24 -0700853 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700854 if (!root)
855 return -ENOMEM;
856
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700857 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700858
859 spin_lock_irqsave(&iommu->lock, flags);
860 iommu->root_entry = root;
861 spin_unlock_irqrestore(&iommu->lock, flags);
862
863 return 0;
864}
865
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700866static void iommu_set_root_entry(struct intel_iommu *iommu)
867{
868 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100869 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700870 unsigned long flag;
871
872 addr = iommu->root_entry;
873
874 spin_lock_irqsave(&iommu->register_lock, flag);
875 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
876
David Woodhousec416daa2009-05-10 20:30:58 +0100877 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700878
879 /* Make sure hardware complete it */
880 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100881 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700882
883 spin_unlock_irqrestore(&iommu->register_lock, flag);
884}
885
886static void iommu_flush_write_buffer(struct intel_iommu *iommu)
887{
888 u32 val;
889 unsigned long flag;
890
David Woodhouse9af88142009-02-13 23:18:03 +0000891 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700892 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700893
894 spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100895 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700896
897 /* Make sure hardware complete it */
898 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100899 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700900
901 spin_unlock_irqrestore(&iommu->register_lock, flag);
902}
903
904/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100905static void __iommu_flush_context(struct intel_iommu *iommu,
906 u16 did, u16 source_id, u8 function_mask,
907 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700908{
909 u64 val = 0;
910 unsigned long flag;
911
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700912 switch (type) {
913 case DMA_CCMD_GLOBAL_INVL:
914 val = DMA_CCMD_GLOBAL_INVL;
915 break;
916 case DMA_CCMD_DOMAIN_INVL:
917 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
918 break;
919 case DMA_CCMD_DEVICE_INVL:
920 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
921 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
922 break;
923 default:
924 BUG();
925 }
926 val |= DMA_CCMD_ICC;
927
928 spin_lock_irqsave(&iommu->register_lock, flag);
929 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
930
931 /* Make sure hardware complete it */
932 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
933 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
934
935 spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700936}
937
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700938/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100939static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
940 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700941{
942 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
943 u64 val = 0, val_iva = 0;
944 unsigned long flag;
945
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700946 switch (type) {
947 case DMA_TLB_GLOBAL_FLUSH:
948 /* global flush doesn't need set IVA_REG */
949 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
950 break;
951 case DMA_TLB_DSI_FLUSH:
952 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
953 break;
954 case DMA_TLB_PSI_FLUSH:
955 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
956 /* Note: always flush non-leaf currently */
957 val_iva = size_order | addr;
958 break;
959 default:
960 BUG();
961 }
962 /* Note: set drain read/write */
963#if 0
964 /*
965 * This is probably to be super secure.. Looks like we can
966 * ignore it without any impact.
967 */
968 if (cap_read_drain(iommu->cap))
969 val |= DMA_TLB_READ_DRAIN;
970#endif
971 if (cap_write_drain(iommu->cap))
972 val |= DMA_TLB_WRITE_DRAIN;
973
974 spin_lock_irqsave(&iommu->register_lock, flag);
975 /* Note: Only uses first TLB reg currently */
976 if (val_iva)
977 dmar_writeq(iommu->reg + tlb_offset, val_iva);
978 dmar_writeq(iommu->reg + tlb_offset + 8, val);
979
980 /* Make sure hardware complete it */
981 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
982 dmar_readq, (!(val & DMA_TLB_IVT)), val);
983
984 spin_unlock_irqrestore(&iommu->register_lock, flag);
985
986 /* check IOTLB invalidation granularity */
987 if (DMA_TLB_IAIG(val) == 0)
988 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
989 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
990 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700991 (unsigned long long)DMA_TLB_IIRG(type),
992 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700993}
994
Yu Zhao93a23a72009-05-18 13:51:37 +0800995static struct device_domain_info *iommu_support_dev_iotlb(
996 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700997{
Yu Zhao93a23a72009-05-18 13:51:37 +0800998 int found = 0;
999 unsigned long flags;
1000 struct device_domain_info *info;
1001 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1002
1003 if (!ecap_dev_iotlb_support(iommu->ecap))
1004 return NULL;
1005
1006 if (!iommu->qi)
1007 return NULL;
1008
1009 spin_lock_irqsave(&device_domain_lock, flags);
1010 list_for_each_entry(info, &domain->devices, link)
1011 if (info->bus == bus && info->devfn == devfn) {
1012 found = 1;
1013 break;
1014 }
1015 spin_unlock_irqrestore(&device_domain_lock, flags);
1016
1017 if (!found || !info->dev)
1018 return NULL;
1019
1020 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1021 return NULL;
1022
1023 if (!dmar_find_matched_atsr_unit(info->dev))
1024 return NULL;
1025
1026 info->iommu = iommu;
1027
1028 return info;
1029}
1030
1031static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1032{
1033 if (!info)
1034 return;
1035
1036 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1037}
1038
1039static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1040{
1041 if (!info->dev || !pci_ats_enabled(info->dev))
1042 return;
1043
1044 pci_disable_ats(info->dev);
1045}
1046
1047static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1048 u64 addr, unsigned mask)
1049{
1050 u16 sid, qdep;
1051 unsigned long flags;
1052 struct device_domain_info *info;
1053
1054 spin_lock_irqsave(&device_domain_lock, flags);
1055 list_for_each_entry(info, &domain->devices, link) {
1056 if (!info->dev || !pci_ats_enabled(info->dev))
1057 continue;
1058
1059 sid = info->bus << 8 | info->devfn;
1060 qdep = pci_ats_queue_depth(info->dev);
1061 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1062 }
1063 spin_unlock_irqrestore(&device_domain_lock, flags);
1064}
1065
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001066static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
Nadav Amit82653632010-04-01 13:24:40 +03001067 unsigned long pfn, unsigned int pages, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001068{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001069 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001070 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001071
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001072 BUG_ON(pages == 0);
1073
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001074 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001075 * Fallback to domain selective flush if no PSI support or the size is
1076 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001077 * PSI requires page size to be 2 ^ x, and the base address is naturally
1078 * aligned to the size
1079 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001080 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1081 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001082 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001083 else
1084 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1085 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001086
1087 /*
Nadav Amit82653632010-04-01 13:24:40 +03001088 * In caching mode, changes of pages from non-present to present require
1089 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001090 */
Nadav Amit82653632010-04-01 13:24:40 +03001091 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001092 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001093}
1094
mark grossf8bab732008-02-08 04:18:38 -08001095static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1096{
1097 u32 pmen;
1098 unsigned long flags;
1099
1100 spin_lock_irqsave(&iommu->register_lock, flags);
1101 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1102 pmen &= ~DMA_PMEN_EPM;
1103 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1104
1105 /* wait for the protected region status bit to clear */
1106 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1107 readl, !(pmen & DMA_PMEN_PRS), pmen);
1108
1109 spin_unlock_irqrestore(&iommu->register_lock, flags);
1110}
1111
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001112static int iommu_enable_translation(struct intel_iommu *iommu)
1113{
1114 u32 sts;
1115 unsigned long flags;
1116
1117 spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001118 iommu->gcmd |= DMA_GCMD_TE;
1119 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001120
1121 /* Make sure hardware complete it */
1122 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001123 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001124
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001125 spin_unlock_irqrestore(&iommu->register_lock, flags);
1126 return 0;
1127}
1128
1129static int iommu_disable_translation(struct intel_iommu *iommu)
1130{
1131 u32 sts;
1132 unsigned long flag;
1133
1134 spin_lock_irqsave(&iommu->register_lock, flag);
1135 iommu->gcmd &= ~DMA_GCMD_TE;
1136 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1137
1138 /* Make sure hardware complete it */
1139 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001140 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001141
1142 spin_unlock_irqrestore(&iommu->register_lock, flag);
1143 return 0;
1144}
1145
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001146
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001147static int iommu_init_domains(struct intel_iommu *iommu)
1148{
1149 unsigned long ndomains;
1150 unsigned long nlongs;
1151
1152 ndomains = cap_ndoms(iommu->cap);
Yinghai Lu680a7522010-04-08 19:58:23 +01001153 pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
1154 ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001155 nlongs = BITS_TO_LONGS(ndomains);
1156
Donald Dutile94a91b52009-08-20 16:51:34 -04001157 spin_lock_init(&iommu->lock);
1158
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001159 /* TBD: there might be 64K domains,
1160 * consider other allocation for future chip
1161 */
1162 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1163 if (!iommu->domain_ids) {
1164 printk(KERN_ERR "Allocating domain id array failed\n");
1165 return -ENOMEM;
1166 }
1167 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1168 GFP_KERNEL);
1169 if (!iommu->domains) {
1170 printk(KERN_ERR "Allocating domain array failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001171 return -ENOMEM;
1172 }
1173
1174 /*
1175 * if Caching mode is set, then invalid translations are tagged
1176 * with domainid 0. Hence we need to pre-allocate it.
1177 */
1178 if (cap_caching_mode(iommu->cap))
1179 set_bit(0, iommu->domain_ids);
1180 return 0;
1181}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001182
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001183
1184static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001185static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001186
1187void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001188{
1189 struct dmar_domain *domain;
1190 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001191 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001192
Donald Dutile94a91b52009-08-20 16:51:34 -04001193 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001194 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Donald Dutile94a91b52009-08-20 16:51:34 -04001195 domain = iommu->domains[i];
1196 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001197
Donald Dutile94a91b52009-08-20 16:51:34 -04001198 spin_lock_irqsave(&domain->iommu_lock, flags);
1199 if (--domain->iommu_count == 0) {
1200 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1201 vm_domain_exit(domain);
1202 else
1203 domain_exit(domain);
1204 }
1205 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001206 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001207 }
1208
1209 if (iommu->gcmd & DMA_GCMD_TE)
1210 iommu_disable_translation(iommu);
1211
1212 if (iommu->irq) {
1213 set_irq_data(iommu->irq, NULL);
1214 /* This will mask the irq */
1215 free_irq(iommu->irq, iommu);
1216 destroy_irq(iommu->irq);
1217 }
1218
1219 kfree(iommu->domains);
1220 kfree(iommu->domain_ids);
1221
Weidong Hand9630fe2008-12-08 11:06:32 +08001222 g_iommus[iommu->seq_id] = NULL;
1223
1224 /* if all iommus are freed, free g_iommus */
1225 for (i = 0; i < g_num_of_iommus; i++) {
1226 if (g_iommus[i])
1227 break;
1228 }
1229
1230 if (i == g_num_of_iommus)
1231 kfree(g_iommus);
1232
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001233 /* free context mapping */
1234 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001235}
1236
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001237static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001238{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001239 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001240
1241 domain = alloc_domain_mem();
1242 if (!domain)
1243 return NULL;
1244
Suresh Siddha4c923d42009-10-02 11:01:24 -07001245 domain->nid = -1;
Weidong Han8c11e792008-12-08 15:29:22 +08001246 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
Weidong Hand71a2f32008-12-07 21:13:41 +08001247 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001248
1249 return domain;
1250}
1251
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001252static int iommu_attach_domain(struct dmar_domain *domain,
1253 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001254{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001255 int num;
1256 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001257 unsigned long flags;
1258
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001259 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001260
1261 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001262
1263 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1264 if (num >= ndomains) {
1265 spin_unlock_irqrestore(&iommu->lock, flags);
1266 printk(KERN_ERR "IOMMU: no free domain ids\n");
1267 return -ENOMEM;
1268 }
1269
1270 domain->id = num;
1271 set_bit(num, iommu->domain_ids);
1272 set_bit(iommu->seq_id, &domain->iommu_bmp);
1273 iommu->domains[num] = domain;
1274 spin_unlock_irqrestore(&iommu->lock, flags);
1275
1276 return 0;
1277}
1278
1279static void iommu_detach_domain(struct dmar_domain *domain,
1280 struct intel_iommu *iommu)
1281{
1282 unsigned long flags;
1283 int num, ndomains;
1284 int found = 0;
1285
1286 spin_lock_irqsave(&iommu->lock, flags);
1287 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001288 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001289 if (iommu->domains[num] == domain) {
1290 found = 1;
1291 break;
1292 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001293 }
1294
1295 if (found) {
1296 clear_bit(num, iommu->domain_ids);
1297 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1298 iommu->domains[num] = NULL;
1299 }
Weidong Han8c11e792008-12-08 15:29:22 +08001300 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001301}
1302
1303static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001304static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001305
1306static void dmar_init_reserved_ranges(void)
1307{
1308 struct pci_dev *pdev = NULL;
1309 struct iova *iova;
1310 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001311
David Millerf6611972008-02-06 01:36:23 -08001312 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001313
Mark Gross8a443df2008-03-04 14:59:31 -08001314 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1315 &reserved_rbtree_key);
1316
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001317 /* IOAPIC ranges shouldn't be accessed by DMA */
1318 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1319 IOVA_PFN(IOAPIC_RANGE_END));
1320 if (!iova)
1321 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1322
1323 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1324 for_each_pci_dev(pdev) {
1325 struct resource *r;
1326
1327 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1328 r = &pdev->resource[i];
1329 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1330 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001331 iova = reserve_iova(&reserved_iova_list,
1332 IOVA_PFN(r->start),
1333 IOVA_PFN(r->end));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001334 if (!iova)
1335 printk(KERN_ERR "Reserve iova failed\n");
1336 }
1337 }
1338
1339}
1340
1341static void domain_reserve_special_ranges(struct dmar_domain *domain)
1342{
1343 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1344}
1345
1346static inline int guestwidth_to_adjustwidth(int gaw)
1347{
1348 int agaw;
1349 int r = (gaw - 12) % 9;
1350
1351 if (r == 0)
1352 agaw = gaw;
1353 else
1354 agaw = gaw + 9 - r;
1355 if (agaw > 64)
1356 agaw = 64;
1357 return agaw;
1358}
1359
1360static int domain_init(struct dmar_domain *domain, int guest_width)
1361{
1362 struct intel_iommu *iommu;
1363 int adjust_width, agaw;
1364 unsigned long sagaw;
1365
David Millerf6611972008-02-06 01:36:23 -08001366 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001367 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001368
1369 domain_reserve_special_ranges(domain);
1370
1371 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001372 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001373 if (guest_width > cap_mgaw(iommu->cap))
1374 guest_width = cap_mgaw(iommu->cap);
1375 domain->gaw = guest_width;
1376 adjust_width = guestwidth_to_adjustwidth(guest_width);
1377 agaw = width_to_agaw(adjust_width);
1378 sagaw = cap_sagaw(iommu->cap);
1379 if (!test_bit(agaw, &sagaw)) {
1380 /* hardware doesn't support it, choose a bigger one */
1381 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1382 agaw = find_next_bit(&sagaw, 5, agaw);
1383 if (agaw >= 5)
1384 return -ENODEV;
1385 }
1386 domain->agaw = agaw;
1387 INIT_LIST_HEAD(&domain->devices);
1388
Weidong Han8e6040972008-12-08 15:49:06 +08001389 if (ecap_coherent(iommu->ecap))
1390 domain->iommu_coherency = 1;
1391 else
1392 domain->iommu_coherency = 0;
1393
Sheng Yang58c610b2009-03-18 15:33:05 +08001394 if (ecap_sc_support(iommu->ecap))
1395 domain->iommu_snooping = 1;
1396 else
1397 domain->iommu_snooping = 0;
1398
Weidong Hanc7151a82008-12-08 22:51:37 +08001399 domain->iommu_count = 1;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001400 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001401
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001402 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001403 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001404 if (!domain->pgd)
1405 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001406 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001407 return 0;
1408}
1409
1410static void domain_exit(struct dmar_domain *domain)
1411{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001412 struct dmar_drhd_unit *drhd;
1413 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001414
1415 /* Domain 0 is reserved, so dont process it */
1416 if (!domain)
1417 return;
1418
1419 domain_remove_dev_info(domain);
1420 /* destroy iovas */
1421 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001422
1423 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001424 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001425
1426 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001427 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001428
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001429 for_each_active_iommu(iommu, drhd)
1430 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1431 iommu_detach_domain(domain, iommu);
1432
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001433 free_domain_mem(domain);
1434}
1435
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001436static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1437 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001438{
1439 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001440 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001441 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001442 struct dma_pte *pgd;
1443 unsigned long num;
1444 unsigned long ndomains;
1445 int id;
1446 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001447 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001448
1449 pr_debug("Set context mapping for %02x:%02x.%d\n",
1450 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001451
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001452 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001453 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1454 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001455
David Woodhouse276dbf992009-04-04 01:45:37 +01001456 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001457 if (!iommu)
1458 return -ENODEV;
1459
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001460 context = device_to_context_entry(iommu, bus, devfn);
1461 if (!context)
1462 return -ENOMEM;
1463 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001464 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001465 spin_unlock_irqrestore(&iommu->lock, flags);
1466 return 0;
1467 }
1468
Weidong Hanea6606b2008-12-08 23:08:15 +08001469 id = domain->id;
1470 pgd = domain->pgd;
1471
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001472 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1473 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001474 int found = 0;
1475
1476 /* find an available domain id for this device in iommu */
1477 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001478 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001479 if (iommu->domains[num] == domain) {
1480 id = num;
1481 found = 1;
1482 break;
1483 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001484 }
1485
1486 if (found == 0) {
1487 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1488 if (num >= ndomains) {
1489 spin_unlock_irqrestore(&iommu->lock, flags);
1490 printk(KERN_ERR "IOMMU: no free domain ids\n");
1491 return -EFAULT;
1492 }
1493
1494 set_bit(num, iommu->domain_ids);
1495 iommu->domains[num] = domain;
1496 id = num;
1497 }
1498
1499 /* Skip top levels of page tables for
1500 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001501 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001502 */
Chris Wright1672af12009-12-02 12:06:34 -08001503 if (translation != CONTEXT_TT_PASS_THROUGH) {
1504 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1505 pgd = phys_to_virt(dma_pte_addr(pgd));
1506 if (!dma_pte_present(pgd)) {
1507 spin_unlock_irqrestore(&iommu->lock, flags);
1508 return -ENOMEM;
1509 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001510 }
1511 }
1512 }
1513
1514 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001515
Yu Zhao93a23a72009-05-18 13:51:37 +08001516 if (translation != CONTEXT_TT_PASS_THROUGH) {
1517 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1518 translation = info ? CONTEXT_TT_DEV_IOTLB :
1519 CONTEXT_TT_MULTI_LEVEL;
1520 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001521 /*
1522 * In pass through mode, AW must be programmed to indicate the largest
1523 * AGAW value supported by hardware. And ASR is ignored by hardware.
1524 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001525 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001526 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001527 else {
1528 context_set_address_root(context, virt_to_phys(pgd));
1529 context_set_address_width(context, iommu->agaw);
1530 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001531
1532 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001533 context_set_fault_enable(context);
1534 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001535 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001536
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001537 /*
1538 * It's a non-present to present mapping. If hardware doesn't cache
1539 * non-present entry we only need to flush the write-buffer. If the
1540 * _does_ cache non-present entries, then it does so in the special
1541 * domain #0, which we have to flush:
1542 */
1543 if (cap_caching_mode(iommu->cap)) {
1544 iommu->flush.flush_context(iommu, 0,
1545 (((u16)bus) << 8) | devfn,
1546 DMA_CCMD_MASK_NOBIT,
1547 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001548 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001549 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001550 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001551 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001552 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001553 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001554
1555 spin_lock_irqsave(&domain->iommu_lock, flags);
1556 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1557 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001558 if (domain->iommu_count == 1)
1559 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001560 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001561 }
1562 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001563 return 0;
1564}
1565
1566static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001567domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1568 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001569{
1570 int ret;
1571 struct pci_dev *tmp, *parent;
1572
David Woodhouse276dbf992009-04-04 01:45:37 +01001573 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001574 pdev->bus->number, pdev->devfn,
1575 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001576 if (ret)
1577 return ret;
1578
1579 /* dependent device mapping */
1580 tmp = pci_find_upstream_pcie_bridge(pdev);
1581 if (!tmp)
1582 return 0;
1583 /* Secondary interface's bus number and devfn 0 */
1584 parent = pdev->bus->self;
1585 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001586 ret = domain_context_mapping_one(domain,
1587 pci_domain_nr(parent->bus),
1588 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001589 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001590 if (ret)
1591 return ret;
1592 parent = parent->bus->self;
1593 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001594 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001595 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001596 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001597 tmp->subordinate->number, 0,
1598 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001599 else /* this is a legacy PCI bridge */
1600 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001601 pci_domain_nr(tmp->bus),
1602 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001603 tmp->devfn,
1604 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001605}
1606
Weidong Han5331fe62008-12-08 23:00:00 +08001607static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001608{
1609 int ret;
1610 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001611 struct intel_iommu *iommu;
1612
David Woodhouse276dbf992009-04-04 01:45:37 +01001613 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1614 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001615 if (!iommu)
1616 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001617
David Woodhouse276dbf992009-04-04 01:45:37 +01001618 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001619 if (!ret)
1620 return ret;
1621 /* dependent device mapping */
1622 tmp = pci_find_upstream_pcie_bridge(pdev);
1623 if (!tmp)
1624 return ret;
1625 /* Secondary interface's bus number and devfn 0 */
1626 parent = pdev->bus->self;
1627 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001628 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001629 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001630 if (!ret)
1631 return ret;
1632 parent = parent->bus->self;
1633 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001634 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001635 return device_context_mapped(iommu, tmp->subordinate->number,
1636 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001637 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001638 return device_context_mapped(iommu, tmp->bus->number,
1639 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001640}
1641
Fenghua Yuf5329592009-08-04 15:09:37 -07001642/* Returns a number of VTD pages, but aligned to MM page size */
1643static inline unsigned long aligned_nrpages(unsigned long host_addr,
1644 size_t size)
1645{
1646 host_addr &= ~PAGE_MASK;
1647 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1648}
1649
David Woodhouse9051aa02009-06-29 12:30:54 +01001650static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1651 struct scatterlist *sg, unsigned long phys_pfn,
1652 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001653{
1654 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001655 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001656 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001657 unsigned long sg_res;
David Woodhousee1605492009-06-29 11:17:38 +01001658
1659 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1660
1661 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1662 return -EINVAL;
1663
1664 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1665
David Woodhouse9051aa02009-06-29 12:30:54 +01001666 if (sg)
1667 sg_res = 0;
1668 else {
1669 sg_res = nr_pages + 1;
1670 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1671 }
1672
David Woodhousee1605492009-06-29 11:17:38 +01001673 while (nr_pages--) {
David Woodhousec85994e2009-07-01 19:21:24 +01001674 uint64_t tmp;
1675
David Woodhousee1605492009-06-29 11:17:38 +01001676 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001677 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001678 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1679 sg->dma_length = sg->length;
1680 pteval = page_to_phys(sg_page(sg)) | prot;
1681 }
1682 if (!pte) {
1683 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1684 if (!pte)
1685 return -ENOMEM;
1686 }
1687 /* We don't need lock here, nobody else
1688 * touches the iova range
1689 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001690 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001691 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001692 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001693 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1694 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001695 if (dumps) {
1696 dumps--;
1697 debug_dma_dump_mappings(NULL);
1698 }
1699 WARN_ON(1);
1700 }
David Woodhousee1605492009-06-29 11:17:38 +01001701 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +01001702 if (!nr_pages || first_pte_in_page(pte)) {
David Woodhousee1605492009-06-29 11:17:38 +01001703 domain_flush_cache(domain, first_pte,
1704 (void *)pte - (void *)first_pte);
1705 pte = NULL;
1706 }
1707 iov_pfn++;
1708 pteval += VTD_PAGE_SIZE;
1709 sg_res--;
1710 if (!sg_res)
1711 sg = sg_next(sg);
1712 }
1713 return 0;
1714}
1715
David Woodhouse9051aa02009-06-29 12:30:54 +01001716static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1717 struct scatterlist *sg, unsigned long nr_pages,
1718 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001719{
David Woodhouse9051aa02009-06-29 12:30:54 +01001720 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1721}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001722
David Woodhouse9051aa02009-06-29 12:30:54 +01001723static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1724 unsigned long phys_pfn, unsigned long nr_pages,
1725 int prot)
1726{
1727 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001728}
1729
Weidong Hanc7151a82008-12-08 22:51:37 +08001730static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001731{
Weidong Hanc7151a82008-12-08 22:51:37 +08001732 if (!iommu)
1733 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001734
1735 clear_context_table(iommu, bus, devfn);
1736 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001737 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001738 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001739}
1740
1741static void domain_remove_dev_info(struct dmar_domain *domain)
1742{
1743 struct device_domain_info *info;
1744 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001745 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001746
1747 spin_lock_irqsave(&device_domain_lock, flags);
1748 while (!list_empty(&domain->devices)) {
1749 info = list_entry(domain->devices.next,
1750 struct device_domain_info, link);
1751 list_del(&info->link);
1752 list_del(&info->global);
1753 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001754 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001755 spin_unlock_irqrestore(&device_domain_lock, flags);
1756
Yu Zhao93a23a72009-05-18 13:51:37 +08001757 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001758 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001759 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001760 free_devinfo_mem(info);
1761
1762 spin_lock_irqsave(&device_domain_lock, flags);
1763 }
1764 spin_unlock_irqrestore(&device_domain_lock, flags);
1765}
1766
1767/*
1768 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001769 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001770 */
Kay, Allen M38717942008-09-09 18:37:29 +03001771static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001772find_domain(struct pci_dev *pdev)
1773{
1774 struct device_domain_info *info;
1775
1776 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001777 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001778 if (info)
1779 return info->domain;
1780 return NULL;
1781}
1782
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001783/* domain is initialized */
1784static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1785{
1786 struct dmar_domain *domain, *found = NULL;
1787 struct intel_iommu *iommu;
1788 struct dmar_drhd_unit *drhd;
1789 struct device_domain_info *info, *tmp;
1790 struct pci_dev *dev_tmp;
1791 unsigned long flags;
1792 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001793 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001794 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001795
1796 domain = find_domain(pdev);
1797 if (domain)
1798 return domain;
1799
David Woodhouse276dbf992009-04-04 01:45:37 +01001800 segment = pci_domain_nr(pdev->bus);
1801
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001802 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1803 if (dev_tmp) {
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001804 if (pci_is_pcie(dev_tmp)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001805 bus = dev_tmp->subordinate->number;
1806 devfn = 0;
1807 } else {
1808 bus = dev_tmp->bus->number;
1809 devfn = dev_tmp->devfn;
1810 }
1811 spin_lock_irqsave(&device_domain_lock, flags);
1812 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001813 if (info->segment == segment &&
1814 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001815 found = info->domain;
1816 break;
1817 }
1818 }
1819 spin_unlock_irqrestore(&device_domain_lock, flags);
1820 /* pcie-pci bridge already has a domain, uses it */
1821 if (found) {
1822 domain = found;
1823 goto found_domain;
1824 }
1825 }
1826
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001827 domain = alloc_domain();
1828 if (!domain)
1829 goto error;
1830
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001831 /* Allocate new domain for the device */
1832 drhd = dmar_find_matched_drhd_unit(pdev);
1833 if (!drhd) {
1834 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1835 pci_name(pdev));
1836 return NULL;
1837 }
1838 iommu = drhd->iommu;
1839
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001840 ret = iommu_attach_domain(domain, iommu);
1841 if (ret) {
1842 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001843 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001844 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001845
1846 if (domain_init(domain, gaw)) {
1847 domain_exit(domain);
1848 goto error;
1849 }
1850
1851 /* register pcie-to-pci device */
1852 if (dev_tmp) {
1853 info = alloc_devinfo_mem();
1854 if (!info) {
1855 domain_exit(domain);
1856 goto error;
1857 }
David Woodhouse276dbf992009-04-04 01:45:37 +01001858 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001859 info->bus = bus;
1860 info->devfn = devfn;
1861 info->dev = NULL;
1862 info->domain = domain;
1863 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08001864 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001865
1866 /* pcie-to-pci bridge already has a domain, uses it */
1867 found = NULL;
1868 spin_lock_irqsave(&device_domain_lock, flags);
1869 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001870 if (tmp->segment == segment &&
1871 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001872 found = tmp->domain;
1873 break;
1874 }
1875 }
1876 if (found) {
Jiri Slaby00dfff72010-06-14 17:17:32 +02001877 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001878 free_devinfo_mem(info);
1879 domain_exit(domain);
1880 domain = found;
1881 } else {
1882 list_add(&info->link, &domain->devices);
1883 list_add(&info->global, &device_domain_list);
Jiri Slaby00dfff72010-06-14 17:17:32 +02001884 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001885 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001886 }
1887
1888found_domain:
1889 info = alloc_devinfo_mem();
1890 if (!info)
1891 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01001892 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001893 info->bus = pdev->bus->number;
1894 info->devfn = pdev->devfn;
1895 info->dev = pdev;
1896 info->domain = domain;
1897 spin_lock_irqsave(&device_domain_lock, flags);
1898 /* somebody is fast */
1899 found = find_domain(pdev);
1900 if (found != NULL) {
1901 spin_unlock_irqrestore(&device_domain_lock, flags);
1902 if (found != domain) {
1903 domain_exit(domain);
1904 domain = found;
1905 }
1906 free_devinfo_mem(info);
1907 return domain;
1908 }
1909 list_add(&info->link, &domain->devices);
1910 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001911 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001912 spin_unlock_irqrestore(&device_domain_lock, flags);
1913 return domain;
1914error:
1915 /* recheck it here, maybe others set it */
1916 return find_domain(pdev);
1917}
1918
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001919static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07001920#define IDENTMAP_ALL 1
1921#define IDENTMAP_GFX 2
1922#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001923
David Woodhouseb2132032009-06-26 18:50:28 +01001924static int iommu_domain_identity_map(struct dmar_domain *domain,
1925 unsigned long long start,
1926 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001927{
David Woodhousec5395d52009-06-28 16:35:56 +01001928 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
1929 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001930
David Woodhousec5395d52009-06-28 16:35:56 +01001931 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
1932 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001933 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01001934 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001935 }
1936
David Woodhousec5395d52009-06-28 16:35:56 +01001937 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1938 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001939 /*
1940 * RMRR range might have overlap with physical memory range,
1941 * clear it first
1942 */
David Woodhousec5395d52009-06-28 16:35:56 +01001943 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001944
David Woodhousec5395d52009-06-28 16:35:56 +01001945 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
1946 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01001947 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01001948}
1949
1950static int iommu_prepare_identity_map(struct pci_dev *pdev,
1951 unsigned long long start,
1952 unsigned long long end)
1953{
1954 struct dmar_domain *domain;
1955 int ret;
1956
David Woodhousec7ab48d2009-06-26 19:10:36 +01001957 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01001958 if (!domain)
1959 return -ENOMEM;
1960
David Woodhouse19943b02009-08-04 16:19:20 +01001961 /* For _hardware_ passthrough, don't bother. But for software
1962 passthrough, we do it anyway -- it may indicate a memory
1963 range which is reserved in E820, so which didn't get set
1964 up to start with in si_domain */
1965 if (domain == si_domain && hw_pass_through) {
1966 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
1967 pci_name(pdev), start, end);
1968 return 0;
1969 }
1970
1971 printk(KERN_INFO
1972 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1973 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01001974
David Woodhouse5595b522009-12-02 09:21:55 +00001975 if (end < start) {
1976 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
1977 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
1978 dmi_get_system_info(DMI_BIOS_VENDOR),
1979 dmi_get_system_info(DMI_BIOS_VERSION),
1980 dmi_get_system_info(DMI_PRODUCT_VERSION));
1981 ret = -EIO;
1982 goto error;
1983 }
1984
David Woodhouse2ff729f2009-08-26 14:25:41 +01001985 if (end >> agaw_to_width(domain->agaw)) {
1986 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
1987 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
1988 agaw_to_width(domain->agaw),
1989 dmi_get_system_info(DMI_BIOS_VENDOR),
1990 dmi_get_system_info(DMI_BIOS_VERSION),
1991 dmi_get_system_info(DMI_PRODUCT_VERSION));
1992 ret = -EIO;
1993 goto error;
1994 }
David Woodhouse19943b02009-08-04 16:19:20 +01001995
David Woodhouseb2132032009-06-26 18:50:28 +01001996 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001997 if (ret)
1998 goto error;
1999
2000 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002001 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002002 if (ret)
2003 goto error;
2004
2005 return 0;
2006
2007 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002008 domain_exit(domain);
2009 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002010}
2011
2012static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2013 struct pci_dev *pdev)
2014{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002015 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002016 return 0;
2017 return iommu_prepare_identity_map(pdev, rmrr->base_address,
2018 rmrr->end_address + 1);
2019}
2020
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002021#ifdef CONFIG_DMAR_FLOPPY_WA
2022static inline void iommu_prepare_isa(void)
2023{
2024 struct pci_dev *pdev;
2025 int ret;
2026
2027 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2028 if (!pdev)
2029 return;
2030
David Woodhousec7ab48d2009-06-26 19:10:36 +01002031 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002032 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
2033
2034 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002035 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2036 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002037
2038}
2039#else
2040static inline void iommu_prepare_isa(void)
2041{
2042 return;
2043}
2044#endif /* !CONFIG_DMAR_FLPY_WA */
2045
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002046static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002047
2048static int __init si_domain_work_fn(unsigned long start_pfn,
2049 unsigned long end_pfn, void *datax)
2050{
2051 int *ret = datax;
2052
2053 *ret = iommu_domain_identity_map(si_domain,
2054 (uint64_t)start_pfn << PAGE_SHIFT,
2055 (uint64_t)end_pfn << PAGE_SHIFT);
2056 return *ret;
2057
2058}
2059
Matt Kraai071e1372009-08-23 22:30:22 -07002060static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002061{
2062 struct dmar_drhd_unit *drhd;
2063 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002064 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002065
2066 si_domain = alloc_domain();
2067 if (!si_domain)
2068 return -EFAULT;
2069
David Woodhousec7ab48d2009-06-26 19:10:36 +01002070 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002071
2072 for_each_active_iommu(iommu, drhd) {
2073 ret = iommu_attach_domain(si_domain, iommu);
2074 if (ret) {
2075 domain_exit(si_domain);
2076 return -EFAULT;
2077 }
2078 }
2079
2080 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2081 domain_exit(si_domain);
2082 return -EFAULT;
2083 }
2084
2085 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2086
David Woodhouse19943b02009-08-04 16:19:20 +01002087 if (hw)
2088 return 0;
2089
David Woodhousec7ab48d2009-06-26 19:10:36 +01002090 for_each_online_node(nid) {
2091 work_with_active_regions(nid, si_domain_work_fn, &ret);
2092 if (ret)
2093 return ret;
2094 }
2095
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002096 return 0;
2097}
2098
2099static void domain_remove_one_dev_info(struct dmar_domain *domain,
2100 struct pci_dev *pdev);
2101static int identity_mapping(struct pci_dev *pdev)
2102{
2103 struct device_domain_info *info;
2104
2105 if (likely(!iommu_identity_mapping))
2106 return 0;
2107
2108
2109 list_for_each_entry(info, &si_domain->devices, link)
2110 if (info->dev == pdev)
2111 return 1;
2112 return 0;
2113}
2114
2115static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002116 struct pci_dev *pdev,
2117 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002118{
2119 struct device_domain_info *info;
2120 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002121 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002122
2123 info = alloc_devinfo_mem();
2124 if (!info)
2125 return -ENOMEM;
2126
David Woodhouse5fe60f42009-08-09 10:53:41 +01002127 ret = domain_context_mapping(domain, pdev, translation);
2128 if (ret) {
2129 free_devinfo_mem(info);
2130 return ret;
2131 }
2132
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002133 info->segment = pci_domain_nr(pdev->bus);
2134 info->bus = pdev->bus->number;
2135 info->devfn = pdev->devfn;
2136 info->dev = pdev;
2137 info->domain = domain;
2138
2139 spin_lock_irqsave(&device_domain_lock, flags);
2140 list_add(&info->link, &domain->devices);
2141 list_add(&info->global, &device_domain_list);
2142 pdev->dev.archdata.iommu = info;
2143 spin_unlock_irqrestore(&device_domain_lock, flags);
2144
2145 return 0;
2146}
2147
David Woodhouse6941af22009-07-04 18:24:27 +01002148static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2149{
David Woodhousee0fc7e02009-09-30 09:12:17 -07002150 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2151 return 1;
2152
2153 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2154 return 1;
2155
2156 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2157 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002158
David Woodhouse3dfc8132009-07-04 19:11:08 +01002159 /*
2160 * We want to start off with all devices in the 1:1 domain, and
2161 * take them out later if we find they can't access all of memory.
2162 *
2163 * However, we can't do this for PCI devices behind bridges,
2164 * because all PCI devices behind the same bridge will end up
2165 * with the same source-id on their transactions.
2166 *
2167 * Practically speaking, we can't change things around for these
2168 * devices at run-time, because we can't be sure there'll be no
2169 * DMA transactions in flight for any of their siblings.
2170 *
2171 * So PCI devices (unless they're on the root bus) as well as
2172 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2173 * the 1:1 domain, just in _case_ one of their siblings turns out
2174 * not to be able to map all of memory.
2175 */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002176 if (!pci_is_pcie(pdev)) {
David Woodhouse3dfc8132009-07-04 19:11:08 +01002177 if (!pci_is_root_bus(pdev->bus))
2178 return 0;
2179 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2180 return 0;
2181 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2182 return 0;
2183
2184 /*
2185 * At boot time, we don't yet know if devices will be 64-bit capable.
2186 * Assume that they will -- if they turn out not to be, then we can
2187 * take them out of the 1:1 domain later.
2188 */
David Woodhouse6941af22009-07-04 18:24:27 +01002189 if (!startup)
2190 return pdev->dma_mask > DMA_BIT_MASK(32);
2191
2192 return 1;
2193}
2194
Matt Kraai071e1372009-08-23 22:30:22 -07002195static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002196{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002197 struct pci_dev *pdev = NULL;
2198 int ret;
2199
David Woodhouse19943b02009-08-04 16:19:20 +01002200 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002201 if (ret)
2202 return -EFAULT;
2203
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002204 for_each_pci_dev(pdev) {
David Woodhouse6941af22009-07-04 18:24:27 +01002205 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse19943b02009-08-04 16:19:20 +01002206 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2207 hw ? "hardware" : "software", pci_name(pdev));
David Woodhousec7ab48d2009-06-26 19:10:36 +01002208
David Woodhouse5fe60f42009-08-09 10:53:41 +01002209 ret = domain_add_dev_info(si_domain, pdev,
David Woodhouse19943b02009-08-04 16:19:20 +01002210 hw ? CONTEXT_TT_PASS_THROUGH :
David Woodhouse62edf5d2009-07-04 10:59:46 +01002211 CONTEXT_TT_MULTI_LEVEL);
2212 if (ret)
2213 return ret;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002214 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002215 }
2216
2217 return 0;
2218}
2219
2220int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002221{
2222 struct dmar_drhd_unit *drhd;
2223 struct dmar_rmrr_unit *rmrr;
2224 struct pci_dev *pdev;
2225 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002226 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002227
2228 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002229 * for each drhd
2230 * allocate root
2231 * initialize and program root entry to not present
2232 * endfor
2233 */
2234 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002235 g_num_of_iommus++;
2236 /*
2237 * lock not needed as this is only incremented in the single
2238 * threaded kernel __init code path all other access are read
2239 * only
2240 */
2241 }
2242
Weidong Hand9630fe2008-12-08 11:06:32 +08002243 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2244 GFP_KERNEL);
2245 if (!g_iommus) {
2246 printk(KERN_ERR "Allocating global iommu array failed\n");
2247 ret = -ENOMEM;
2248 goto error;
2249 }
2250
mark gross80b20dd2008-04-18 13:53:58 -07002251 deferred_flush = kzalloc(g_num_of_iommus *
2252 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2253 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002254 ret = -ENOMEM;
2255 goto error;
2256 }
2257
mark gross5e0d2a62008-03-04 15:22:08 -08002258 for_each_drhd_unit(drhd) {
2259 if (drhd->ignored)
2260 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002261
2262 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002263 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002264
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002265 ret = iommu_init_domains(iommu);
2266 if (ret)
2267 goto error;
2268
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002269 /*
2270 * TBD:
2271 * we could share the same root & context tables
2272 * amoung all IOMMU's. Need to Split it later.
2273 */
2274 ret = iommu_alloc_root_entry(iommu);
2275 if (ret) {
2276 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2277 goto error;
2278 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002279 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002280 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002281 }
2282
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002283 /*
2284 * Start from the sane iommu hardware state.
2285 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002286 for_each_drhd_unit(drhd) {
2287 if (drhd->ignored)
2288 continue;
2289
2290 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002291
2292 /*
2293 * If the queued invalidation is already initialized by us
2294 * (for example, while enabling interrupt-remapping) then
2295 * we got the things already rolling from a sane state.
2296 */
2297 if (iommu->qi)
2298 continue;
2299
2300 /*
2301 * Clear any previous faults.
2302 */
2303 dmar_fault(-1, iommu);
2304 /*
2305 * Disable queued invalidation if supported and already enabled
2306 * before OS handover.
2307 */
2308 dmar_disable_qi(iommu);
2309 }
2310
2311 for_each_drhd_unit(drhd) {
2312 if (drhd->ignored)
2313 continue;
2314
2315 iommu = drhd->iommu;
2316
Youquan Songa77b67d2008-10-16 16:31:56 -07002317 if (dmar_enable_qi(iommu)) {
2318 /*
2319 * Queued Invalidate not enabled, use Register Based
2320 * Invalidate
2321 */
2322 iommu->flush.flush_context = __iommu_flush_context;
2323 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002324 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002325 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002326 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002327 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002328 } else {
2329 iommu->flush.flush_context = qi_flush_context;
2330 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002331 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002332 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002333 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002334 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002335 }
2336 }
2337
David Woodhouse19943b02009-08-04 16:19:20 +01002338 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002339 iommu_identity_mapping |= IDENTMAP_ALL;
2340
David Woodhouse19943b02009-08-04 16:19:20 +01002341#ifdef CONFIG_DMAR_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002342 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002343#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002344
2345 check_tylersburg_isoch();
2346
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002347 /*
2348 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002349 * identity mappings for rmrr, gfx, and isa and may fall back to static
2350 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002351 */
David Woodhouse19943b02009-08-04 16:19:20 +01002352 if (iommu_identity_mapping) {
2353 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2354 if (ret) {
2355 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2356 goto error;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002357 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002358 }
David Woodhouse19943b02009-08-04 16:19:20 +01002359 /*
2360 * For each rmrr
2361 * for each dev attached to rmrr
2362 * do
2363 * locate drhd for dev, alloc domain for dev
2364 * allocate free domain
2365 * allocate page table entries for rmrr
2366 * if context not allocated for bus
2367 * allocate and init context
2368 * set present in root table for this bus
2369 * init context with domain, translation etc
2370 * endfor
2371 * endfor
2372 */
2373 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2374 for_each_rmrr_units(rmrr) {
2375 for (i = 0; i < rmrr->devices_cnt; i++) {
2376 pdev = rmrr->devices[i];
2377 /*
2378 * some BIOS lists non-exist devices in DMAR
2379 * table.
2380 */
2381 if (!pdev)
2382 continue;
2383 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2384 if (ret)
2385 printk(KERN_ERR
2386 "IOMMU: mapping reserved region failed\n");
2387 }
2388 }
2389
2390 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002391
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002392 /*
2393 * for each drhd
2394 * enable fault log
2395 * global invalidate context cache
2396 * global invalidate iotlb
2397 * enable translation
2398 */
2399 for_each_drhd_unit(drhd) {
2400 if (drhd->ignored)
2401 continue;
2402 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002403
2404 iommu_flush_write_buffer(iommu);
2405
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002406 ret = dmar_set_interrupt(iommu);
2407 if (ret)
2408 goto error;
2409
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002410 iommu_set_root_entry(iommu);
2411
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002412 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002413 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002414
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002415 ret = iommu_enable_translation(iommu);
2416 if (ret)
2417 goto error;
David Woodhouseb94996c2009-09-19 15:28:12 -07002418
2419 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002420 }
2421
2422 return 0;
2423error:
2424 for_each_drhd_unit(drhd) {
2425 if (drhd->ignored)
2426 continue;
2427 iommu = drhd->iommu;
2428 free_iommu(iommu);
2429 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002430 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002431 return ret;
2432}
2433
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002434/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002435static struct iova *intel_alloc_iova(struct device *dev,
2436 struct dmar_domain *domain,
2437 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002438{
2439 struct pci_dev *pdev = to_pci_dev(dev);
2440 struct iova *iova = NULL;
2441
David Woodhouse875764d2009-06-28 21:20:51 +01002442 /* Restrict dma_mask to the width that the iommu can handle */
2443 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2444
2445 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002446 /*
2447 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002448 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002449 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002450 */
David Woodhouse875764d2009-06-28 21:20:51 +01002451 iova = alloc_iova(&domain->iovad, nrpages,
2452 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2453 if (iova)
2454 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002455 }
David Woodhouse875764d2009-06-28 21:20:51 +01002456 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2457 if (unlikely(!iova)) {
2458 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2459 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002460 return NULL;
2461 }
2462
2463 return iova;
2464}
2465
David Woodhouse147202a2009-07-07 19:43:20 +01002466static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002467{
2468 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002469 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002470
2471 domain = get_domain_for_dev(pdev,
2472 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2473 if (!domain) {
2474 printk(KERN_ERR
2475 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002476 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002477 }
2478
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002479 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002480 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002481 ret = domain_context_mapping(domain, pdev,
2482 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002483 if (ret) {
2484 printk(KERN_ERR
2485 "Domain context map for %s failed",
2486 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002487 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002488 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002489 }
2490
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002491 return domain;
2492}
2493
David Woodhouse147202a2009-07-07 19:43:20 +01002494static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2495{
2496 struct device_domain_info *info;
2497
2498 /* No lock here, assumes no domain exit in normal case */
2499 info = dev->dev.archdata.iommu;
2500 if (likely(info))
2501 return info->domain;
2502
2503 return __get_valid_domain_for_dev(dev);
2504}
2505
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002506static int iommu_dummy(struct pci_dev *pdev)
2507{
2508 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2509}
2510
2511/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002512static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002513{
David Woodhouse73676832009-07-04 14:08:36 +01002514 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002515 int found;
2516
David Woodhouse73676832009-07-04 14:08:36 +01002517 if (unlikely(dev->bus != &pci_bus_type))
2518 return 1;
2519
2520 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002521 if (iommu_dummy(pdev))
2522 return 1;
2523
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002524 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002525 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002526
2527 found = identity_mapping(pdev);
2528 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002529 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002530 return 1;
2531 else {
2532 /*
2533 * 32 bit DMA is removed from si_domain and fall back
2534 * to non-identity mapping.
2535 */
2536 domain_remove_one_dev_info(si_domain, pdev);
2537 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2538 pci_name(pdev));
2539 return 0;
2540 }
2541 } else {
2542 /*
2543 * In case of a detached 64 bit DMA device from vm, the device
2544 * is put into si_domain for identity mapping.
2545 */
David Woodhouse6941af22009-07-04 18:24:27 +01002546 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002547 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002548 ret = domain_add_dev_info(si_domain, pdev,
2549 hw_pass_through ?
2550 CONTEXT_TT_PASS_THROUGH :
2551 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002552 if (!ret) {
2553 printk(KERN_INFO "64bit %s uses identity mapping\n",
2554 pci_name(pdev));
2555 return 1;
2556 }
2557 }
2558 }
2559
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002560 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002561}
2562
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002563static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2564 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002565{
2566 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002567 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002568 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002569 struct iova *iova;
2570 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002571 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002572 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002573 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002574
2575 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002576
David Woodhouse73676832009-07-04 14:08:36 +01002577 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002578 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002579
2580 domain = get_valid_domain_for_dev(pdev);
2581 if (!domain)
2582 return 0;
2583
Weidong Han8c11e792008-12-08 15:29:22 +08002584 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002585 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002586
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002587 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2588 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002589 if (!iova)
2590 goto error;
2591
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002592 /*
2593 * Check if DMAR supports zero-length reads on write only
2594 * mappings..
2595 */
2596 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002597 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002598 prot |= DMA_PTE_READ;
2599 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2600 prot |= DMA_PTE_WRITE;
2601 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002602 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002603 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002604 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002605 * is not a big problem
2606 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002607 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002608 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002609 if (ret)
2610 goto error;
2611
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002612 /* it's a non-present to present mapping. Only flush if caching mode */
2613 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002614 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002615 else
Weidong Han8c11e792008-12-08 15:29:22 +08002616 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002617
David Woodhouse03d6a242009-06-28 15:33:46 +01002618 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2619 start_paddr += paddr & ~PAGE_MASK;
2620 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002621
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002622error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002623 if (iova)
2624 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002625 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002626 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002627 return 0;
2628}
2629
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002630static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2631 unsigned long offset, size_t size,
2632 enum dma_data_direction dir,
2633 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002634{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002635 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2636 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002637}
2638
mark gross5e0d2a62008-03-04 15:22:08 -08002639static void flush_unmaps(void)
2640{
mark gross80b20dd2008-04-18 13:53:58 -07002641 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002642
mark gross5e0d2a62008-03-04 15:22:08 -08002643 timer_on = 0;
2644
2645 /* just flush them all */
2646 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002647 struct intel_iommu *iommu = g_iommus[i];
2648 if (!iommu)
2649 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002650
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002651 if (!deferred_flush[i].next)
2652 continue;
2653
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002654 /* In caching mode, global flushes turn emulation expensive */
2655 if (!cap_caching_mode(iommu->cap))
2656 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002657 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002658 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002659 unsigned long mask;
2660 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002661 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08002662
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002663 /* On real hardware multiple invalidations are expensive */
2664 if (cap_caching_mode(iommu->cap))
2665 iommu_flush_iotlb_psi(iommu, domain->id,
2666 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2667 else {
2668 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2669 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2670 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2671 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002672 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002673 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002674 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002675 }
2676
mark gross5e0d2a62008-03-04 15:22:08 -08002677 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002678}
2679
2680static void flush_unmaps_timeout(unsigned long data)
2681{
mark gross80b20dd2008-04-18 13:53:58 -07002682 unsigned long flags;
2683
2684 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002685 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002686 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002687}
2688
2689static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2690{
2691 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002692 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002693 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002694
2695 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002696 if (list_size == HIGH_WATER_MARK)
2697 flush_unmaps();
2698
Weidong Han8c11e792008-12-08 15:29:22 +08002699 iommu = domain_get_iommu(dom);
2700 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002701
mark gross80b20dd2008-04-18 13:53:58 -07002702 next = deferred_flush[iommu_id].next;
2703 deferred_flush[iommu_id].domain[next] = dom;
2704 deferred_flush[iommu_id].iova[next] = iova;
2705 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002706
2707 if (!timer_on) {
2708 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2709 timer_on = 1;
2710 }
2711 list_size++;
2712 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2713}
2714
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002715static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2716 size_t size, enum dma_data_direction dir,
2717 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002718{
2719 struct pci_dev *pdev = to_pci_dev(dev);
2720 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002721 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002722 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002723 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002724
David Woodhouse73676832009-07-04 14:08:36 +01002725 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002726 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002727
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002728 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002729 BUG_ON(!domain);
2730
Weidong Han8c11e792008-12-08 15:29:22 +08002731 iommu = domain_get_iommu(domain);
2732
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002733 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002734 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2735 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002736 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002737
David Woodhoused794dc92009-06-28 00:27:49 +01002738 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2739 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002740
David Woodhoused794dc92009-06-28 00:27:49 +01002741 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2742 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002743
2744 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002745 dma_pte_clear_range(domain, start_pfn, last_pfn);
2746
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002747 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002748 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2749
mark gross5e0d2a62008-03-04 15:22:08 -08002750 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002751 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002752 last_pfn - start_pfn + 1, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08002753 /* free iova */
2754 __free_iova(&domain->iovad, iova);
2755 } else {
2756 add_unmap(domain, iova);
2757 /*
2758 * queue up the release of the unmap to save the 1/6th of the
2759 * cpu used up by the iotlb flush operation...
2760 */
mark gross5e0d2a62008-03-04 15:22:08 -08002761 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002762}
2763
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002764static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2765 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002766{
2767 void *vaddr;
2768 int order;
2769
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002770 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002771 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07002772
2773 if (!iommu_no_mapping(hwdev))
2774 flags &= ~(GFP_DMA | GFP_DMA32);
2775 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2776 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2777 flags |= GFP_DMA;
2778 else
2779 flags |= GFP_DMA32;
2780 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002781
2782 vaddr = (void *)__get_free_pages(flags, order);
2783 if (!vaddr)
2784 return NULL;
2785 memset(vaddr, 0, size);
2786
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002787 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2788 DMA_BIDIRECTIONAL,
2789 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002790 if (*dma_handle)
2791 return vaddr;
2792 free_pages((unsigned long)vaddr, order);
2793 return NULL;
2794}
2795
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002796static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2797 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002798{
2799 int order;
2800
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002801 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002802 order = get_order(size);
2803
David Woodhouse0db9b7a2009-07-14 02:01:57 +01002804 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002805 free_pages((unsigned long)vaddr, order);
2806}
2807
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002808static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2809 int nelems, enum dma_data_direction dir,
2810 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002811{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002812 struct pci_dev *pdev = to_pci_dev(hwdev);
2813 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002814 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002815 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002816 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002817
David Woodhouse73676832009-07-04 14:08:36 +01002818 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002819 return;
2820
2821 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002822 BUG_ON(!domain);
2823
2824 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002825
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002826 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01002827 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
2828 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002829 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002830
David Woodhoused794dc92009-06-28 00:27:49 +01002831 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2832 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002833
2834 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002835 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002836
David Woodhoused794dc92009-06-28 00:27:49 +01002837 /* free page tables */
2838 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2839
David Woodhouseacea0012009-07-14 01:55:11 +01002840 if (intel_iommu_strict) {
2841 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002842 last_pfn - start_pfn + 1, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01002843 /* free iova */
2844 __free_iova(&domain->iovad, iova);
2845 } else {
2846 add_unmap(domain, iova);
2847 /*
2848 * queue up the release of the unmap to save the 1/6th of the
2849 * cpu used up by the iotlb flush operation...
2850 */
2851 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002852}
2853
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002854static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002855 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002856{
2857 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002858 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002859
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002860 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02002861 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00002862 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002863 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002864 }
2865 return nelems;
2866}
2867
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002868static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2869 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002870{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002871 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002872 struct pci_dev *pdev = to_pci_dev(hwdev);
2873 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002874 size_t size = 0;
2875 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002876 struct iova *iova = NULL;
2877 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002878 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01002879 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08002880 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002881
2882 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01002883 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002884 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002885
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002886 domain = get_valid_domain_for_dev(pdev);
2887 if (!domain)
2888 return 0;
2889
Weidong Han8c11e792008-12-08 15:29:22 +08002890 iommu = domain_get_iommu(domain);
2891
David Woodhouseb536d242009-06-28 14:49:31 +01002892 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01002893 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002894
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002895 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2896 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002897 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002898 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002899 return 0;
2900 }
2901
2902 /*
2903 * Check if DMAR supports zero-length reads on write only
2904 * mappings..
2905 */
2906 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002907 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002908 prot |= DMA_PTE_READ;
2909 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2910 prot |= DMA_PTE_WRITE;
2911
David Woodhouseb536d242009-06-28 14:49:31 +01002912 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01002913
Fenghua Yuf5329592009-08-04 15:09:37 -07002914 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01002915 if (unlikely(ret)) {
2916 /* clear the page */
2917 dma_pte_clear_range(domain, start_vpfn,
2918 start_vpfn + size - 1);
2919 /* free page tables */
2920 dma_pte_free_pagetable(domain, start_vpfn,
2921 start_vpfn + size - 1);
2922 /* free iova */
2923 __free_iova(&domain->iovad, iova);
2924 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002925 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002926
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002927 /* it's a non-present to present mapping. Only flush if caching mode */
2928 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002929 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002930 else
Weidong Han8c11e792008-12-08 15:29:22 +08002931 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002932
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002933 return nelems;
2934}
2935
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002936static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2937{
2938 return !dma_addr;
2939}
2940
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002941struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002942 .alloc_coherent = intel_alloc_coherent,
2943 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002944 .map_sg = intel_map_sg,
2945 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002946 .map_page = intel_map_page,
2947 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002948 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002949};
2950
2951static inline int iommu_domain_cache_init(void)
2952{
2953 int ret = 0;
2954
2955 iommu_domain_cache = kmem_cache_create("iommu_domain",
2956 sizeof(struct dmar_domain),
2957 0,
2958 SLAB_HWCACHE_ALIGN,
2959
2960 NULL);
2961 if (!iommu_domain_cache) {
2962 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2963 ret = -ENOMEM;
2964 }
2965
2966 return ret;
2967}
2968
2969static inline int iommu_devinfo_cache_init(void)
2970{
2971 int ret = 0;
2972
2973 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2974 sizeof(struct device_domain_info),
2975 0,
2976 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002977 NULL);
2978 if (!iommu_devinfo_cache) {
2979 printk(KERN_ERR "Couldn't create devinfo cache\n");
2980 ret = -ENOMEM;
2981 }
2982
2983 return ret;
2984}
2985
2986static inline int iommu_iova_cache_init(void)
2987{
2988 int ret = 0;
2989
2990 iommu_iova_cache = kmem_cache_create("iommu_iova",
2991 sizeof(struct iova),
2992 0,
2993 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002994 NULL);
2995 if (!iommu_iova_cache) {
2996 printk(KERN_ERR "Couldn't create iova cache\n");
2997 ret = -ENOMEM;
2998 }
2999
3000 return ret;
3001}
3002
3003static int __init iommu_init_mempool(void)
3004{
3005 int ret;
3006 ret = iommu_iova_cache_init();
3007 if (ret)
3008 return ret;
3009
3010 ret = iommu_domain_cache_init();
3011 if (ret)
3012 goto domain_error;
3013
3014 ret = iommu_devinfo_cache_init();
3015 if (!ret)
3016 return ret;
3017
3018 kmem_cache_destroy(iommu_domain_cache);
3019domain_error:
3020 kmem_cache_destroy(iommu_iova_cache);
3021
3022 return -ENOMEM;
3023}
3024
3025static void __init iommu_exit_mempool(void)
3026{
3027 kmem_cache_destroy(iommu_devinfo_cache);
3028 kmem_cache_destroy(iommu_domain_cache);
3029 kmem_cache_destroy(iommu_iova_cache);
3030
3031}
3032
Dan Williams556ab452010-07-23 15:47:56 -07003033static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3034{
3035 struct dmar_drhd_unit *drhd;
3036 u32 vtbar;
3037 int rc;
3038
3039 /* We know that this device on this chipset has its own IOMMU.
3040 * If we find it under a different IOMMU, then the BIOS is lying
3041 * to us. Hope that the IOMMU for this device is actually
3042 * disabled, and it needs no translation...
3043 */
3044 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3045 if (rc) {
3046 /* "can't" happen */
3047 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3048 return;
3049 }
3050 vtbar &= 0xffff0000;
3051
3052 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3053 drhd = dmar_find_matched_drhd_unit(pdev);
3054 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3055 TAINT_FIRMWARE_WORKAROUND,
3056 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3057 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3058}
3059DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3060
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003061static void __init init_no_remapping_devices(void)
3062{
3063 struct dmar_drhd_unit *drhd;
3064
3065 for_each_drhd_unit(drhd) {
3066 if (!drhd->include_all) {
3067 int i;
3068 for (i = 0; i < drhd->devices_cnt; i++)
3069 if (drhd->devices[i] != NULL)
3070 break;
3071 /* ignore DMAR unit if no pci devices exist */
3072 if (i == drhd->devices_cnt)
3073 drhd->ignored = 1;
3074 }
3075 }
3076
3077 if (dmar_map_gfx)
3078 return;
3079
3080 for_each_drhd_unit(drhd) {
3081 int i;
3082 if (drhd->ignored || drhd->include_all)
3083 continue;
3084
3085 for (i = 0; i < drhd->devices_cnt; i++)
3086 if (drhd->devices[i] &&
3087 !IS_GFX_DEVICE(drhd->devices[i]))
3088 break;
3089
3090 if (i < drhd->devices_cnt)
3091 continue;
3092
3093 /* bypass IOMMU if it is just for gfx devices */
3094 drhd->ignored = 1;
3095 for (i = 0; i < drhd->devices_cnt; i++) {
3096 if (!drhd->devices[i])
3097 continue;
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07003098 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003099 }
3100 }
3101}
3102
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003103#ifdef CONFIG_SUSPEND
3104static int init_iommu_hw(void)
3105{
3106 struct dmar_drhd_unit *drhd;
3107 struct intel_iommu *iommu = NULL;
3108
3109 for_each_active_iommu(iommu, drhd)
3110 if (iommu->qi)
3111 dmar_reenable_qi(iommu);
3112
3113 for_each_active_iommu(iommu, drhd) {
3114 iommu_flush_write_buffer(iommu);
3115
3116 iommu_set_root_entry(iommu);
3117
3118 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003119 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003120 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003121 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003122 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003123 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003124 }
3125
3126 return 0;
3127}
3128
3129static void iommu_flush_all(void)
3130{
3131 struct dmar_drhd_unit *drhd;
3132 struct intel_iommu *iommu;
3133
3134 for_each_active_iommu(iommu, drhd) {
3135 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003136 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003137 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003138 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003139 }
3140}
3141
3142static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3143{
3144 struct dmar_drhd_unit *drhd;
3145 struct intel_iommu *iommu = NULL;
3146 unsigned long flag;
3147
3148 for_each_active_iommu(iommu, drhd) {
3149 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3150 GFP_ATOMIC);
3151 if (!iommu->iommu_state)
3152 goto nomem;
3153 }
3154
3155 iommu_flush_all();
3156
3157 for_each_active_iommu(iommu, drhd) {
3158 iommu_disable_translation(iommu);
3159
3160 spin_lock_irqsave(&iommu->register_lock, flag);
3161
3162 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3163 readl(iommu->reg + DMAR_FECTL_REG);
3164 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3165 readl(iommu->reg + DMAR_FEDATA_REG);
3166 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3167 readl(iommu->reg + DMAR_FEADDR_REG);
3168 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3169 readl(iommu->reg + DMAR_FEUADDR_REG);
3170
3171 spin_unlock_irqrestore(&iommu->register_lock, flag);
3172 }
3173 return 0;
3174
3175nomem:
3176 for_each_active_iommu(iommu, drhd)
3177 kfree(iommu->iommu_state);
3178
3179 return -ENOMEM;
3180}
3181
3182static int iommu_resume(struct sys_device *dev)
3183{
3184 struct dmar_drhd_unit *drhd;
3185 struct intel_iommu *iommu = NULL;
3186 unsigned long flag;
3187
3188 if (init_iommu_hw()) {
3189 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3190 return -EIO;
3191 }
3192
3193 for_each_active_iommu(iommu, drhd) {
3194
3195 spin_lock_irqsave(&iommu->register_lock, flag);
3196
3197 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3198 iommu->reg + DMAR_FECTL_REG);
3199 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3200 iommu->reg + DMAR_FEDATA_REG);
3201 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3202 iommu->reg + DMAR_FEADDR_REG);
3203 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3204 iommu->reg + DMAR_FEUADDR_REG);
3205
3206 spin_unlock_irqrestore(&iommu->register_lock, flag);
3207 }
3208
3209 for_each_active_iommu(iommu, drhd)
3210 kfree(iommu->iommu_state);
3211
3212 return 0;
3213}
3214
3215static struct sysdev_class iommu_sysclass = {
3216 .name = "iommu",
3217 .resume = iommu_resume,
3218 .suspend = iommu_suspend,
3219};
3220
3221static struct sys_device device_iommu = {
3222 .cls = &iommu_sysclass,
3223};
3224
3225static int __init init_iommu_sysfs(void)
3226{
3227 int error;
3228
3229 error = sysdev_class_register(&iommu_sysclass);
3230 if (error)
3231 return error;
3232
3233 error = sysdev_register(&device_iommu);
3234 if (error)
3235 sysdev_class_unregister(&iommu_sysclass);
3236
3237 return error;
3238}
3239
3240#else
3241static int __init init_iommu_sysfs(void)
3242{
3243 return 0;
3244}
3245#endif /* CONFIG_PM */
3246
Fenghua Yu99dcade2009-11-11 07:23:06 -08003247/*
3248 * Here we only respond to action of unbound device from driver.
3249 *
3250 * Added device is not attached to its DMAR domain here yet. That will happen
3251 * when mapping the device to iova.
3252 */
3253static int device_notifier(struct notifier_block *nb,
3254 unsigned long action, void *data)
3255{
3256 struct device *dev = data;
3257 struct pci_dev *pdev = to_pci_dev(dev);
3258 struct dmar_domain *domain;
3259
David Woodhouse44cd6132009-12-02 10:18:30 +00003260 if (iommu_no_mapping(dev))
3261 return 0;
3262
Fenghua Yu99dcade2009-11-11 07:23:06 -08003263 domain = find_domain(pdev);
3264 if (!domain)
3265 return 0;
3266
3267 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through)
3268 domain_remove_one_dev_info(domain, pdev);
3269
3270 return 0;
3271}
3272
3273static struct notifier_block device_nb = {
3274 .notifier_call = device_notifier,
3275};
3276
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003277int __init intel_iommu_init(void)
3278{
3279 int ret = 0;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003280 int force_on = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003281
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003282 /* VT-d is required for a TXT/tboot launch, so enforce that */
3283 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003284
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003285 if (dmar_table_init()) {
3286 if (force_on)
3287 panic("tboot: Failed to initialize DMAR table\n");
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003288 return -ENODEV;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003289 }
3290
3291 if (dmar_dev_scope_init()) {
3292 if (force_on)
3293 panic("tboot: Failed to initialize DMAR device scope\n");
3294 return -ENODEV;
3295 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003296
Suresh Siddha2ae21012008-07-10 11:16:43 -07003297 /*
3298 * Check the need for DMA-remapping initialization now.
3299 * Above initialization will also be used by Interrupt-remapping.
3300 */
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003301 if (no_iommu || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003302 return -ENODEV;
3303
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003304 iommu_init_mempool();
3305 dmar_init_reserved_ranges();
3306
3307 init_no_remapping_devices();
3308
3309 ret = init_dmars();
3310 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003311 if (force_on)
3312 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003313 printk(KERN_ERR "IOMMU: dmar init failed\n");
3314 put_iova_domain(&reserved_iova_list);
3315 iommu_exit_mempool();
3316 return ret;
3317 }
3318 printk(KERN_INFO
3319 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3320
mark gross5e0d2a62008-03-04 15:22:08 -08003321 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003322#ifdef CONFIG_SWIOTLB
3323 swiotlb = 0;
3324#endif
David Woodhouse19943b02009-08-04 16:19:20 +01003325 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003326
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003327 init_iommu_sysfs();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003328
3329 register_iommu(&intel_iommu_ops);
3330
Fenghua Yu99dcade2009-11-11 07:23:06 -08003331 bus_register_notifier(&pci_bus_type, &device_nb);
3332
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003333 return 0;
3334}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003335
Han, Weidong3199aa62009-02-26 17:31:12 +08003336static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3337 struct pci_dev *pdev)
3338{
3339 struct pci_dev *tmp, *parent;
3340
3341 if (!iommu || !pdev)
3342 return;
3343
3344 /* dependent device detach */
3345 tmp = pci_find_upstream_pcie_bridge(pdev);
3346 /* Secondary interface's bus number and devfn 0 */
3347 if (tmp) {
3348 parent = pdev->bus->self;
3349 while (parent != tmp) {
3350 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003351 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003352 parent = parent->bus->self;
3353 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05003354 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08003355 iommu_detach_dev(iommu,
3356 tmp->subordinate->number, 0);
3357 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003358 iommu_detach_dev(iommu, tmp->bus->number,
3359 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003360 }
3361}
3362
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003363static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003364 struct pci_dev *pdev)
3365{
3366 struct device_domain_info *info;
3367 struct intel_iommu *iommu;
3368 unsigned long flags;
3369 int found = 0;
3370 struct list_head *entry, *tmp;
3371
David Woodhouse276dbf992009-04-04 01:45:37 +01003372 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3373 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003374 if (!iommu)
3375 return;
3376
3377 spin_lock_irqsave(&device_domain_lock, flags);
3378 list_for_each_safe(entry, tmp, &domain->devices) {
3379 info = list_entry(entry, struct device_domain_info, link);
David Woodhouse276dbf992009-04-04 01:45:37 +01003380 /* No need to compare PCI domain; it has to be the same */
Weidong Hanc7151a82008-12-08 22:51:37 +08003381 if (info->bus == pdev->bus->number &&
3382 info->devfn == pdev->devfn) {
3383 list_del(&info->link);
3384 list_del(&info->global);
3385 if (info->dev)
3386 info->dev->dev.archdata.iommu = NULL;
3387 spin_unlock_irqrestore(&device_domain_lock, flags);
3388
Yu Zhao93a23a72009-05-18 13:51:37 +08003389 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003390 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003391 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003392 free_devinfo_mem(info);
3393
3394 spin_lock_irqsave(&device_domain_lock, flags);
3395
3396 if (found)
3397 break;
3398 else
3399 continue;
3400 }
3401
3402 /* if there is no other devices under the same iommu
3403 * owned by this domain, clear this iommu in iommu_bmp
3404 * update iommu count and coherency
3405 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003406 if (iommu == device_to_iommu(info->segment, info->bus,
3407 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003408 found = 1;
3409 }
3410
3411 if (found == 0) {
3412 unsigned long tmp_flags;
3413 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3414 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3415 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003416 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003417 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3418 }
3419
3420 spin_unlock_irqrestore(&device_domain_lock, flags);
3421}
3422
3423static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3424{
3425 struct device_domain_info *info;
3426 struct intel_iommu *iommu;
3427 unsigned long flags1, flags2;
3428
3429 spin_lock_irqsave(&device_domain_lock, flags1);
3430 while (!list_empty(&domain->devices)) {
3431 info = list_entry(domain->devices.next,
3432 struct device_domain_info, link);
3433 list_del(&info->link);
3434 list_del(&info->global);
3435 if (info->dev)
3436 info->dev->dev.archdata.iommu = NULL;
3437
3438 spin_unlock_irqrestore(&device_domain_lock, flags1);
3439
Yu Zhao93a23a72009-05-18 13:51:37 +08003440 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003441 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003442 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003443 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003444
3445 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003446 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003447 */
3448 spin_lock_irqsave(&domain->iommu_lock, flags2);
3449 if (test_and_clear_bit(iommu->seq_id,
3450 &domain->iommu_bmp)) {
3451 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003452 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003453 }
3454 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3455
3456 free_devinfo_mem(info);
3457 spin_lock_irqsave(&device_domain_lock, flags1);
3458 }
3459 spin_unlock_irqrestore(&device_domain_lock, flags1);
3460}
3461
Weidong Han5e98c4b2008-12-08 23:03:27 +08003462/* domain id for virtual machine, it won't be set in context */
3463static unsigned long vm_domid;
3464
3465static struct dmar_domain *iommu_alloc_vm_domain(void)
3466{
3467 struct dmar_domain *domain;
3468
3469 domain = alloc_domain_mem();
3470 if (!domain)
3471 return NULL;
3472
3473 domain->id = vm_domid++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003474 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003475 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3476 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3477
3478 return domain;
3479}
3480
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003481static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003482{
3483 int adjust_width;
3484
3485 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003486 spin_lock_init(&domain->iommu_lock);
3487
3488 domain_reserve_special_ranges(domain);
3489
3490 /* calculate AGAW */
3491 domain->gaw = guest_width;
3492 adjust_width = guestwidth_to_adjustwidth(guest_width);
3493 domain->agaw = width_to_agaw(adjust_width);
3494
3495 INIT_LIST_HEAD(&domain->devices);
3496
3497 domain->iommu_count = 0;
3498 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003499 domain->iommu_snooping = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003500 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003501 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003502
3503 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07003504 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003505 if (!domain->pgd)
3506 return -ENOMEM;
3507 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3508 return 0;
3509}
3510
3511static void iommu_free_vm_domain(struct dmar_domain *domain)
3512{
3513 unsigned long flags;
3514 struct dmar_drhd_unit *drhd;
3515 struct intel_iommu *iommu;
3516 unsigned long i;
3517 unsigned long ndomains;
3518
3519 for_each_drhd_unit(drhd) {
3520 if (drhd->ignored)
3521 continue;
3522 iommu = drhd->iommu;
3523
3524 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08003525 for_each_set_bit(i, iommu->domain_ids, ndomains) {
Weidong Han5e98c4b2008-12-08 23:03:27 +08003526 if (iommu->domains[i] == domain) {
3527 spin_lock_irqsave(&iommu->lock, flags);
3528 clear_bit(i, iommu->domain_ids);
3529 iommu->domains[i] = NULL;
3530 spin_unlock_irqrestore(&iommu->lock, flags);
3531 break;
3532 }
Weidong Han5e98c4b2008-12-08 23:03:27 +08003533 }
3534 }
3535}
3536
3537static void vm_domain_exit(struct dmar_domain *domain)
3538{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003539 /* Domain 0 is reserved, so dont process it */
3540 if (!domain)
3541 return;
3542
3543 vm_domain_remove_all_dev_info(domain);
3544 /* destroy iovas */
3545 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003546
3547 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003548 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003549
3550 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003551 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003552
3553 iommu_free_vm_domain(domain);
3554 free_domain_mem(domain);
3555}
3556
Joerg Roedel5d450802008-12-03 14:52:32 +01003557static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003558{
Joerg Roedel5d450802008-12-03 14:52:32 +01003559 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003560
Joerg Roedel5d450802008-12-03 14:52:32 +01003561 dmar_domain = iommu_alloc_vm_domain();
3562 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003563 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003564 "intel_iommu_domain_init: dmar_domain == NULL\n");
3565 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003566 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003567 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003568 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003569 "intel_iommu_domain_init() failed\n");
3570 vm_domain_exit(dmar_domain);
3571 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003572 }
Joerg Roedel5d450802008-12-03 14:52:32 +01003573 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003574
Joerg Roedel5d450802008-12-03 14:52:32 +01003575 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003576}
Kay, Allen M38717942008-09-09 18:37:29 +03003577
Joerg Roedel5d450802008-12-03 14:52:32 +01003578static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003579{
Joerg Roedel5d450802008-12-03 14:52:32 +01003580 struct dmar_domain *dmar_domain = domain->priv;
3581
3582 domain->priv = NULL;
3583 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003584}
Kay, Allen M38717942008-09-09 18:37:29 +03003585
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003586static int intel_iommu_attach_device(struct iommu_domain *domain,
3587 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003588{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003589 struct dmar_domain *dmar_domain = domain->priv;
3590 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003591 struct intel_iommu *iommu;
3592 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03003593
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003594 /* normally pdev is not mapped */
3595 if (unlikely(domain_context_mapped(pdev))) {
3596 struct dmar_domain *old_domain;
3597
3598 old_domain = find_domain(pdev);
3599 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003600 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3601 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3602 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003603 else
3604 domain_remove_dev_info(old_domain);
3605 }
3606 }
3607
David Woodhouse276dbf992009-04-04 01:45:37 +01003608 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3609 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003610 if (!iommu)
3611 return -ENODEV;
3612
3613 /* check if this iommu agaw is sufficient for max mapped address */
3614 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01003615 if (addr_width > cap_mgaw(iommu->cap))
3616 addr_width = cap_mgaw(iommu->cap);
3617
3618 if (dmar_domain->max_addr > (1LL << addr_width)) {
3619 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003620 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01003621 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003622 return -EFAULT;
3623 }
Tom Lyona99c47a2010-05-17 08:20:45 +01003624 dmar_domain->gaw = addr_width;
3625
3626 /*
3627 * Knock out extra levels of page tables if necessary
3628 */
3629 while (iommu->agaw < dmar_domain->agaw) {
3630 struct dma_pte *pte;
3631
3632 pte = dmar_domain->pgd;
3633 if (dma_pte_present(pte)) {
3634 free_pgtable_page(dmar_domain->pgd);
Sheng Yang25cbff12010-06-12 19:21:42 +08003635 dmar_domain->pgd = (struct dma_pte *)
3636 phys_to_virt(dma_pte_addr(pte));
Tom Lyona99c47a2010-05-17 08:20:45 +01003637 }
3638 dmar_domain->agaw--;
3639 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003640
David Woodhouse5fe60f42009-08-09 10:53:41 +01003641 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003642}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003643
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003644static void intel_iommu_detach_device(struct iommu_domain *domain,
3645 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003646{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003647 struct dmar_domain *dmar_domain = domain->priv;
3648 struct pci_dev *pdev = to_pci_dev(dev);
3649
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003650 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003651}
Kay, Allen M38717942008-09-09 18:37:29 +03003652
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003653static int intel_iommu_map(struct iommu_domain *domain,
3654 unsigned long iova, phys_addr_t hpa,
3655 int gfp_order, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03003656{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003657 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003658 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003659 int prot = 0;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003660 size_t size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003661 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003662
Joerg Roedeldde57a22008-12-03 15:04:09 +01003663 if (iommu_prot & IOMMU_READ)
3664 prot |= DMA_PTE_READ;
3665 if (iommu_prot & IOMMU_WRITE)
3666 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08003667 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3668 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003669
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003670 size = PAGE_SIZE << gfp_order;
David Woodhouse163cc522009-06-28 00:51:17 +01003671 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003672 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003673 u64 end;
3674
3675 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01003676 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003677 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01003678 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003679 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01003680 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003681 return -EFAULT;
3682 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01003683 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003684 }
David Woodhousead051222009-06-28 14:22:28 +01003685 /* Round up size to next multiple of PAGE_SIZE, if it and
3686 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01003687 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01003688 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3689 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003690 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003691}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003692
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003693static int intel_iommu_unmap(struct iommu_domain *domain,
3694 unsigned long iova, int gfp_order)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003695{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003696 struct dmar_domain *dmar_domain = domain->priv;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003697 size_t size = PAGE_SIZE << gfp_order;
Sheng Yang4b99d352009-07-08 11:52:52 +01003698
David Woodhouse163cc522009-06-28 00:51:17 +01003699 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3700 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003701
David Woodhouse163cc522009-06-28 00:51:17 +01003702 if (dmar_domain->max_addr == iova + size)
3703 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003704
3705 return gfp_order;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003706}
Kay, Allen M38717942008-09-09 18:37:29 +03003707
Joerg Roedeld14d6572008-12-03 15:06:57 +01003708static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3709 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03003710{
Joerg Roedeld14d6572008-12-03 15:06:57 +01003711 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03003712 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003713 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003714
David Woodhouseb026fd22009-06-28 10:37:25 +01003715 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
Kay, Allen M38717942008-09-09 18:37:29 +03003716 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003717 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03003718
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003719 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03003720}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003721
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003722static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3723 unsigned long cap)
3724{
3725 struct dmar_domain *dmar_domain = domain->priv;
3726
3727 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3728 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04003729 if (cap == IOMMU_CAP_INTR_REMAP)
3730 return intr_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003731
3732 return 0;
3733}
3734
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003735static struct iommu_ops intel_iommu_ops = {
3736 .domain_init = intel_iommu_domain_init,
3737 .domain_destroy = intel_iommu_domain_destroy,
3738 .attach_dev = intel_iommu_attach_device,
3739 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003740 .map = intel_iommu_map,
3741 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003742 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003743 .domain_has_cap = intel_iommu_domain_has_cap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003744};
David Woodhouse9af88142009-02-13 23:18:03 +00003745
3746static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3747{
3748 /*
3749 * Mobile 4 Series Chipset neglects to set RWBF capability,
3750 * but needs it:
3751 */
3752 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3753 rwbf_quirk = 1;
David Woodhouse2d9e6672010-06-15 10:57:57 +01003754
3755 /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
3756 if (dev->revision == 0x07) {
3757 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
3758 dmar_map_gfx = 0;
3759 }
David Woodhouse9af88142009-02-13 23:18:03 +00003760}
3761
3762DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07003763
3764/* On Tylersburg chipsets, some BIOSes have been known to enable the
3765 ISOCH DMAR unit for the Azalia sound device, but not give it any
3766 TLB entries, which causes it to deadlock. Check for that. We do
3767 this in a function called from init_dmars(), instead of in a PCI
3768 quirk, because we don't want to print the obnoxious "BIOS broken"
3769 message if VT-d is actually disabled.
3770*/
3771static void __init check_tylersburg_isoch(void)
3772{
3773 struct pci_dev *pdev;
3774 uint32_t vtisochctrl;
3775
3776 /* If there's no Azalia in the system anyway, forget it. */
3777 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
3778 if (!pdev)
3779 return;
3780 pci_dev_put(pdev);
3781
3782 /* System Management Registers. Might be hidden, in which case
3783 we can't do the sanity check. But that's OK, because the
3784 known-broken BIOSes _don't_ actually hide it, so far. */
3785 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
3786 if (!pdev)
3787 return;
3788
3789 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
3790 pci_dev_put(pdev);
3791 return;
3792 }
3793
3794 pci_dev_put(pdev);
3795
3796 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
3797 if (vtisochctrl & 1)
3798 return;
3799
3800 /* Drop all bits other than the number of TLB entries */
3801 vtisochctrl &= 0x1c;
3802
3803 /* If we have the recommended number of TLB entries (16), fine. */
3804 if (vtisochctrl == 0x10)
3805 return;
3806
3807 /* Zero TLB entries? You get to ride the short bus to school. */
3808 if (!vtisochctrl) {
3809 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
3810 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
3811 dmi_get_system_info(DMI_BIOS_VENDOR),
3812 dmi_get_system_info(DMI_BIOS_VERSION),
3813 dmi_get_system_info(DMI_PRODUCT_VERSION));
3814 iommu_identity_mapping |= IDENTMAP_AZALIA;
3815 return;
3816 }
3817
3818 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
3819 vtisochctrl);
3820}