blob: a55f5fb06b1478e34884d9a6473ada70f624bbff [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080035#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010037#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030038#include <linux/intel-iommu.h>
Fenghua Yuf59c7b62009-03-27 14:22:42 -070039#include <linux/sysdev.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070040#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090041#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070042#include "pci.h"
43
Fenghua Yu5b6985c2008-10-16 18:02:32 -070044#define ROOT_SIZE VTD_PAGE_SIZE
45#define CONTEXT_SIZE VTD_PAGE_SIZE
46
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070047#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
49
50#define IOAPIC_RANGE_START (0xfee00000)
51#define IOAPIC_RANGE_END (0xfeefffff)
52#define IOVA_START_ADDR (0x1000)
53
54#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
55
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070056#define MAX_AGAW_WIDTH 64
57
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
David Woodhouse595badf2009-06-27 22:09:11 +010059#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070060
Mark McLoughlinf27be032008-11-20 15:49:43 +000061#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070062#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070063#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080064
David Woodhousefd18de52009-05-10 23:57:41 +010065
David Woodhousedd4e8312009-06-27 16:21:20 +010066/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
67 are never going to work. */
68static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
69{
70 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
71}
72
73static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
74{
75 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
76}
77static inline unsigned long page_to_dma_pfn(struct page *pg)
78{
79 return mm_to_dma_pfn(page_to_pfn(pg));
80}
81static inline unsigned long virt_to_dma_pfn(void *p)
82{
83 return page_to_dma_pfn(virt_to_page(p));
84}
85
Weidong Hand9630fe2008-12-08 11:06:32 +080086/* global iommu list, set NULL for ignored DMAR units */
87static struct intel_iommu **g_iommus;
88
David Woodhouse9af88142009-02-13 23:18:03 +000089static int rwbf_quirk;
90
Mark McLoughlin46b08e12008-11-20 15:49:44 +000091/*
92 * 0: Present
93 * 1-11: Reserved
94 * 12-63: Context Ptr (12 - (haw-1))
95 * 64-127: Reserved
96 */
97struct root_entry {
98 u64 val;
99 u64 rsvd1;
100};
101#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
102static inline bool root_present(struct root_entry *root)
103{
104 return (root->val & 1);
105}
106static inline void set_root_present(struct root_entry *root)
107{
108 root->val |= 1;
109}
110static inline void set_root_value(struct root_entry *root, unsigned long value)
111{
112 root->val |= value & VTD_PAGE_MASK;
113}
114
115static inline struct context_entry *
116get_context_addr_from_root(struct root_entry *root)
117{
118 return (struct context_entry *)
119 (root_present(root)?phys_to_virt(
120 root->val & VTD_PAGE_MASK) :
121 NULL);
122}
123
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000124/*
125 * low 64 bits:
126 * 0: present
127 * 1: fault processing disable
128 * 2-3: translation type
129 * 12-63: address space root
130 * high 64 bits:
131 * 0-2: address width
132 * 3-6: aval
133 * 8-23: domain id
134 */
135struct context_entry {
136 u64 lo;
137 u64 hi;
138};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000139
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000140static inline bool context_present(struct context_entry *context)
141{
142 return (context->lo & 1);
143}
144static inline void context_set_present(struct context_entry *context)
145{
146 context->lo |= 1;
147}
148
149static inline void context_set_fault_enable(struct context_entry *context)
150{
151 context->lo &= (((u64)-1) << 2) | 1;
152}
153
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000154static inline void context_set_translation_type(struct context_entry *context,
155 unsigned long value)
156{
157 context->lo &= (((u64)-1) << 4) | 3;
158 context->lo |= (value & 3) << 2;
159}
160
161static inline void context_set_address_root(struct context_entry *context,
162 unsigned long value)
163{
164 context->lo |= value & VTD_PAGE_MASK;
165}
166
167static inline void context_set_address_width(struct context_entry *context,
168 unsigned long value)
169{
170 context->hi |= value & 7;
171}
172
173static inline void context_set_domain_id(struct context_entry *context,
174 unsigned long value)
175{
176 context->hi |= (value & ((1 << 16) - 1)) << 8;
177}
178
179static inline void context_clear_entry(struct context_entry *context)
180{
181 context->lo = 0;
182 context->hi = 0;
183}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000184
Mark McLoughlin622ba122008-11-20 15:49:46 +0000185/*
186 * 0: readable
187 * 1: writable
188 * 2-6: reserved
189 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800190 * 8-10: available
191 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000192 * 12-63: Host physcial address
193 */
194struct dma_pte {
195 u64 val;
196};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000197
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000198static inline void dma_clear_pte(struct dma_pte *pte)
199{
200 pte->val = 0;
201}
202
203static inline void dma_set_pte_readable(struct dma_pte *pte)
204{
205 pte->val |= DMA_PTE_READ;
206}
207
208static inline void dma_set_pte_writable(struct dma_pte *pte)
209{
210 pte->val |= DMA_PTE_WRITE;
211}
212
Sheng Yang9cf06692009-03-18 15:33:07 +0800213static inline void dma_set_pte_snp(struct dma_pte *pte)
214{
215 pte->val |= DMA_PTE_SNP;
216}
217
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000218static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
219{
220 pte->val = (pte->val & ~3) | (prot & 3);
221}
222
223static inline u64 dma_pte_addr(struct dma_pte *pte)
224{
225 return (pte->val & VTD_PAGE_MASK);
226}
227
David Woodhousedd4e8312009-06-27 16:21:20 +0100228static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000229{
David Woodhousedd4e8312009-06-27 16:21:20 +0100230 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000231}
232
233static inline bool dma_pte_present(struct dma_pte *pte)
234{
235 return (pte->val & 3) != 0;
236}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000237
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700238/*
239 * This domain is a statically identity mapping domain.
240 * 1. This domain creats a static 1:1 mapping to all usable memory.
241 * 2. It maps to each iommu if successful.
242 * 3. Each iommu mapps to this domain if successful.
243 */
244struct dmar_domain *si_domain;
245
Weidong Han3b5410e2008-12-08 09:17:15 +0800246/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100247#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800248
Weidong Han1ce28fe2008-12-08 16:35:39 +0800249/* domain represents a virtual machine, more than one devices
250 * across iommus may be owned in one domain, e.g. kvm guest.
251 */
252#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
253
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700254/* si_domain contains mulitple devices */
255#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
256
Mark McLoughlin99126f72008-11-20 15:49:47 +0000257struct dmar_domain {
258 int id; /* domain id */
Weidong Han8c11e792008-12-08 15:29:22 +0800259 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000260
261 struct list_head devices; /* all devices' list */
262 struct iova_domain iovad; /* iova's that belong to this domain */
263
264 struct dma_pte *pgd; /* virtual address */
265 spinlock_t mapping_lock; /* page table lock */
266 int gaw; /* max guest address width */
267
268 /* adjusted guest address width, 0 is level 2 30-bit */
269 int agaw;
270
Weidong Han3b5410e2008-12-08 09:17:15 +0800271 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800272
273 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800274 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800275 int iommu_count; /* reference count of iommu */
276 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800277 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000278};
279
Mark McLoughlina647dac2008-11-20 15:49:48 +0000280/* PCI domain-device relationship */
281struct device_domain_info {
282 struct list_head link; /* link to domain siblings */
283 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100284 int segment; /* PCI domain */
285 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000286 u8 devfn; /* PCI devfn number */
287 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800288 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000289 struct dmar_domain *domain; /* pointer to domain */
290};
291
mark gross5e0d2a62008-03-04 15:22:08 -0800292static void flush_unmaps_timeout(unsigned long data);
293
294DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
295
mark gross80b20dd2008-04-18 13:53:58 -0700296#define HIGH_WATER_MARK 250
297struct deferred_flush_tables {
298 int next;
299 struct iova *iova[HIGH_WATER_MARK];
300 struct dmar_domain *domain[HIGH_WATER_MARK];
301};
302
303static struct deferred_flush_tables *deferred_flush;
304
mark gross5e0d2a62008-03-04 15:22:08 -0800305/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800306static int g_num_of_iommus;
307
308static DEFINE_SPINLOCK(async_umap_flush_lock);
309static LIST_HEAD(unmaps_to_do);
310
311static int timer_on;
312static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800313
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700314static void domain_remove_dev_info(struct dmar_domain *domain);
315
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800316#ifdef CONFIG_DMAR_DEFAULT_ON
317int dmar_disabled = 0;
318#else
319int dmar_disabled = 1;
320#endif /*CONFIG_DMAR_DEFAULT_ON*/
321
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700322static int __initdata dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700323static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800324static int intel_iommu_strict;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700325
326#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
327static DEFINE_SPINLOCK(device_domain_lock);
328static LIST_HEAD(device_domain_list);
329
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100330static struct iommu_ops intel_iommu_ops;
331
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700332static int __init intel_iommu_setup(char *str)
333{
334 if (!str)
335 return -EINVAL;
336 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800337 if (!strncmp(str, "on", 2)) {
338 dmar_disabled = 0;
339 printk(KERN_INFO "Intel-IOMMU: enabled\n");
340 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700341 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800342 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700343 } else if (!strncmp(str, "igfx_off", 8)) {
344 dmar_map_gfx = 0;
345 printk(KERN_INFO
346 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700347 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800348 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700349 "Intel-IOMMU: Forcing DAC for PCI devices\n");
350 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800351 } else if (!strncmp(str, "strict", 6)) {
352 printk(KERN_INFO
353 "Intel-IOMMU: disable batched IOTLB flush\n");
354 intel_iommu_strict = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700355 }
356
357 str += strcspn(str, ",");
358 while (*str == ',')
359 str++;
360 }
361 return 0;
362}
363__setup("intel_iommu=", intel_iommu_setup);
364
365static struct kmem_cache *iommu_domain_cache;
366static struct kmem_cache *iommu_devinfo_cache;
367static struct kmem_cache *iommu_iova_cache;
368
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700369static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
370{
371 unsigned int flags;
372 void *vaddr;
373
374 /* trying to avoid low memory issues */
375 flags = current->flags & PF_MEMALLOC;
376 current->flags |= PF_MEMALLOC;
377 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
378 current->flags &= (~PF_MEMALLOC | flags);
379 return vaddr;
380}
381
382
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700383static inline void *alloc_pgtable_page(void)
384{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700385 unsigned int flags;
386 void *vaddr;
387
388 /* trying to avoid low memory issues */
389 flags = current->flags & PF_MEMALLOC;
390 current->flags |= PF_MEMALLOC;
391 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
392 current->flags &= (~PF_MEMALLOC | flags);
393 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700394}
395
396static inline void free_pgtable_page(void *vaddr)
397{
398 free_page((unsigned long)vaddr);
399}
400
401static inline void *alloc_domain_mem(void)
402{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700403 return iommu_kmem_cache_alloc(iommu_domain_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700404}
405
Kay, Allen M38717942008-09-09 18:37:29 +0300406static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700407{
408 kmem_cache_free(iommu_domain_cache, vaddr);
409}
410
411static inline void * alloc_devinfo_mem(void)
412{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700413 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700414}
415
416static inline void free_devinfo_mem(void *vaddr)
417{
418 kmem_cache_free(iommu_devinfo_cache, vaddr);
419}
420
421struct iova *alloc_iova_mem(void)
422{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700423 return iommu_kmem_cache_alloc(iommu_iova_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700424}
425
426void free_iova_mem(struct iova *iova)
427{
428 kmem_cache_free(iommu_iova_cache, iova);
429}
430
Weidong Han1b573682008-12-08 15:34:06 +0800431
432static inline int width_to_agaw(int width);
433
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700434static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800435{
436 unsigned long sagaw;
437 int agaw = -1;
438
439 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700440 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800441 agaw >= 0; agaw--) {
442 if (test_bit(agaw, &sagaw))
443 break;
444 }
445
446 return agaw;
447}
448
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700449/*
450 * Calculate max SAGAW for each iommu.
451 */
452int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
453{
454 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
455}
456
457/*
458 * calculate agaw for each iommu.
459 * "SAGAW" may be different across iommus, use a default agaw, and
460 * get a supported less agaw for iommus that don't support the default agaw.
461 */
462int iommu_calculate_agaw(struct intel_iommu *iommu)
463{
464 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
465}
466
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700467/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800468static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
469{
470 int iommu_id;
471
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700472 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800473 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700474 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800475
Weidong Han8c11e792008-12-08 15:29:22 +0800476 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
477 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
478 return NULL;
479
480 return g_iommus[iommu_id];
481}
482
Weidong Han8e6040972008-12-08 15:49:06 +0800483static void domain_update_iommu_coherency(struct dmar_domain *domain)
484{
485 int i;
486
487 domain->iommu_coherency = 1;
488
489 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
490 for (; i < g_num_of_iommus; ) {
491 if (!ecap_coherent(g_iommus[i]->ecap)) {
492 domain->iommu_coherency = 0;
493 break;
494 }
495 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
496 }
497}
498
Sheng Yang58c610b2009-03-18 15:33:05 +0800499static void domain_update_iommu_snooping(struct dmar_domain *domain)
500{
501 int i;
502
503 domain->iommu_snooping = 1;
504
505 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
506 for (; i < g_num_of_iommus; ) {
507 if (!ecap_sc_support(g_iommus[i]->ecap)) {
508 domain->iommu_snooping = 0;
509 break;
510 }
511 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
512 }
513}
514
515/* Some capabilities may be different across iommus */
516static void domain_update_iommu_cap(struct dmar_domain *domain)
517{
518 domain_update_iommu_coherency(domain);
519 domain_update_iommu_snooping(domain);
520}
521
David Woodhouse276dbf992009-04-04 01:45:37 +0100522static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800523{
524 struct dmar_drhd_unit *drhd = NULL;
525 int i;
526
527 for_each_drhd_unit(drhd) {
528 if (drhd->ignored)
529 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100530 if (segment != drhd->segment)
531 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800532
David Woodhouse924b6232009-04-04 00:39:25 +0100533 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000534 if (drhd->devices[i] &&
535 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800536 drhd->devices[i]->devfn == devfn)
537 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700538 if (drhd->devices[i] &&
539 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100540 drhd->devices[i]->subordinate->number <= bus &&
541 drhd->devices[i]->subordinate->subordinate >= bus)
542 return drhd->iommu;
543 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800544
545 if (drhd->include_all)
546 return drhd->iommu;
547 }
548
549 return NULL;
550}
551
Weidong Han5331fe62008-12-08 23:00:00 +0800552static void domain_flush_cache(struct dmar_domain *domain,
553 void *addr, int size)
554{
555 if (!domain->iommu_coherency)
556 clflush_cache_range(addr, size);
557}
558
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700559/* Gets context entry for a given bus and devfn */
560static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
561 u8 bus, u8 devfn)
562{
563 struct root_entry *root;
564 struct context_entry *context;
565 unsigned long phy_addr;
566 unsigned long flags;
567
568 spin_lock_irqsave(&iommu->lock, flags);
569 root = &iommu->root_entry[bus];
570 context = get_context_addr_from_root(root);
571 if (!context) {
572 context = (struct context_entry *)alloc_pgtable_page();
573 if (!context) {
574 spin_unlock_irqrestore(&iommu->lock, flags);
575 return NULL;
576 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700577 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700578 phy_addr = virt_to_phys((void *)context);
579 set_root_value(root, phy_addr);
580 set_root_present(root);
581 __iommu_flush_cache(iommu, root, sizeof(*root));
582 }
583 spin_unlock_irqrestore(&iommu->lock, flags);
584 return &context[devfn];
585}
586
587static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
588{
589 struct root_entry *root;
590 struct context_entry *context;
591 int ret;
592 unsigned long flags;
593
594 spin_lock_irqsave(&iommu->lock, flags);
595 root = &iommu->root_entry[bus];
596 context = get_context_addr_from_root(root);
597 if (!context) {
598 ret = 0;
599 goto out;
600 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000601 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700602out:
603 spin_unlock_irqrestore(&iommu->lock, flags);
604 return ret;
605}
606
607static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
608{
609 struct root_entry *root;
610 struct context_entry *context;
611 unsigned long flags;
612
613 spin_lock_irqsave(&iommu->lock, flags);
614 root = &iommu->root_entry[bus];
615 context = get_context_addr_from_root(root);
616 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000617 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700618 __iommu_flush_cache(iommu, &context[devfn], \
619 sizeof(*context));
620 }
621 spin_unlock_irqrestore(&iommu->lock, flags);
622}
623
624static void free_context_table(struct intel_iommu *iommu)
625{
626 struct root_entry *root;
627 int i;
628 unsigned long flags;
629 struct context_entry *context;
630
631 spin_lock_irqsave(&iommu->lock, flags);
632 if (!iommu->root_entry) {
633 goto out;
634 }
635 for (i = 0; i < ROOT_ENTRY_NR; i++) {
636 root = &iommu->root_entry[i];
637 context = get_context_addr_from_root(root);
638 if (context)
639 free_pgtable_page(context);
640 }
641 free_pgtable_page(iommu->root_entry);
642 iommu->root_entry = NULL;
643out:
644 spin_unlock_irqrestore(&iommu->lock, flags);
645}
646
647/* page table handling */
648#define LEVEL_STRIDE (9)
649#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
650
651static inline int agaw_to_level(int agaw)
652{
653 return agaw + 2;
654}
655
656static inline int agaw_to_width(int agaw)
657{
658 return 30 + agaw * LEVEL_STRIDE;
659
660}
661
662static inline int width_to_agaw(int width)
663{
664 return (width - 30) / LEVEL_STRIDE;
665}
666
667static inline unsigned int level_to_offset_bits(int level)
668{
David Woodhouse6660c632009-06-27 22:41:00 +0100669 return (level - 1) * LEVEL_STRIDE;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700670}
671
David Woodhouse77dfa562009-06-27 16:40:08 +0100672static inline int pfn_level_offset(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700673{
David Woodhouse6660c632009-06-27 22:41:00 +0100674 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700675}
676
David Woodhouse6660c632009-06-27 22:41:00 +0100677static inline unsigned long level_mask(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700678{
David Woodhouse6660c632009-06-27 22:41:00 +0100679 return -1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700680}
681
David Woodhouse6660c632009-06-27 22:41:00 +0100682static inline unsigned long level_size(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700683{
David Woodhouse6660c632009-06-27 22:41:00 +0100684 return 1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700685}
686
David Woodhouse6660c632009-06-27 22:41:00 +0100687static inline unsigned long align_to_level(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700688{
David Woodhouse6660c632009-06-27 22:41:00 +0100689 return (pfn + level_size(level) - 1) & level_mask(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700690}
691
David Woodhouseb026fd22009-06-28 10:37:25 +0100692static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
693 unsigned long pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700694{
David Woodhouseb026fd22009-06-28 10:37:25 +0100695 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700696 struct dma_pte *parent, *pte = NULL;
697 int level = agaw_to_level(domain->agaw);
698 int offset;
699 unsigned long flags;
700
701 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100702 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700703 parent = domain->pgd;
704
705 spin_lock_irqsave(&domain->mapping_lock, flags);
706 while (level > 0) {
707 void *tmp_page;
708
David Woodhouseb026fd22009-06-28 10:37:25 +0100709 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700710 pte = &parent[offset];
711 if (level == 1)
712 break;
713
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000714 if (!dma_pte_present(pte)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700715 tmp_page = alloc_pgtable_page();
716
717 if (!tmp_page) {
718 spin_unlock_irqrestore(&domain->mapping_lock,
719 flags);
720 return NULL;
721 }
Weidong Han5331fe62008-12-08 23:00:00 +0800722 domain_flush_cache(domain, tmp_page, PAGE_SIZE);
David Woodhousedd4e8312009-06-27 16:21:20 +0100723 dma_set_pte_pfn(pte, virt_to_dma_pfn(tmp_page));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700724 /*
725 * high level table always sets r/w, last level page
726 * table control read/write
727 */
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000728 dma_set_pte_readable(pte);
729 dma_set_pte_writable(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800730 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700731 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000732 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700733 level--;
734 }
735
736 spin_unlock_irqrestore(&domain->mapping_lock, flags);
737 return pte;
738}
739
740/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100741static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
742 unsigned long pfn,
743 int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700744{
745 struct dma_pte *parent, *pte = NULL;
746 int total = agaw_to_level(domain->agaw);
747 int offset;
748
749 parent = domain->pgd;
750 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100751 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700752 pte = &parent[offset];
753 if (level == total)
754 return pte;
755
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000756 if (!dma_pte_present(pte))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700757 break;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000758 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700759 total--;
760 }
761 return NULL;
762}
763
764/* clear one page's page table */
David Woodhousea75f7cf2009-06-27 17:44:39 +0100765static void dma_pte_clear_one(struct dmar_domain *domain, unsigned long pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700766{
767 struct dma_pte *pte = NULL;
768
769 /* get last level pte */
David Woodhousea75f7cf2009-06-27 17:44:39 +0100770 pte = dma_pfn_level_pte(domain, pfn, 1);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700771
772 if (pte) {
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000773 dma_clear_pte(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800774 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700775 }
776}
777
778/* clear last level pte, a tlb flush should be followed */
David Woodhouse595badf2009-06-27 22:09:11 +0100779static void dma_pte_clear_range(struct dmar_domain *domain,
780 unsigned long start_pfn,
781 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700782{
David Woodhouse04b18e62009-06-27 19:15:01 +0100783 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700784
David Woodhouse04b18e62009-06-27 19:15:01 +0100785 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100786 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse66eae842009-06-27 19:00:32 +0100787
David Woodhouse04b18e62009-06-27 19:15:01 +0100788 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse595badf2009-06-27 22:09:11 +0100789 while (start_pfn <= last_pfn) {
David Woodhouse04b18e62009-06-27 19:15:01 +0100790 dma_pte_clear_one(domain, start_pfn);
791 start_pfn++;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700792 }
793}
794
795/* free page table pages. last level pte should already be cleared */
796static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100797 unsigned long start_pfn,
798 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700799{
David Woodhouse6660c632009-06-27 22:41:00 +0100800 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700801 struct dma_pte *pte;
802 int total = agaw_to_level(domain->agaw);
803 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100804 unsigned long tmp;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700805
David Woodhouse6660c632009-06-27 22:41:00 +0100806 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
807 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700808
809 /* we don't need lock here, nobody else touches the iova range */
810 level = 2;
811 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100812 tmp = align_to_level(start_pfn, level);
813
814 /* Only clear this pte/pmd if we're asked to clear its
815 _whole_ range */
816 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700817 return;
818
David Woodhouse6660c632009-06-27 22:41:00 +0100819 while (tmp <= last_pfn) {
820 pte = dma_pfn_level_pte(domain, tmp, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700821 if (pte) {
822 free_pgtable_page(
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000823 phys_to_virt(dma_pte_addr(pte)));
824 dma_clear_pte(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800825 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700826 }
827 tmp += level_size(level);
828 }
829 level++;
830 }
831 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100832 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700833 free_pgtable_page(domain->pgd);
834 domain->pgd = NULL;
835 }
836}
837
838/* iommu handling */
839static int iommu_alloc_root_entry(struct intel_iommu *iommu)
840{
841 struct root_entry *root;
842 unsigned long flags;
843
844 root = (struct root_entry *)alloc_pgtable_page();
845 if (!root)
846 return -ENOMEM;
847
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700848 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700849
850 spin_lock_irqsave(&iommu->lock, flags);
851 iommu->root_entry = root;
852 spin_unlock_irqrestore(&iommu->lock, flags);
853
854 return 0;
855}
856
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700857static void iommu_set_root_entry(struct intel_iommu *iommu)
858{
859 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100860 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700861 unsigned long flag;
862
863 addr = iommu->root_entry;
864
865 spin_lock_irqsave(&iommu->register_lock, flag);
866 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
867
David Woodhousec416daa2009-05-10 20:30:58 +0100868 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700869
870 /* Make sure hardware complete it */
871 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100872 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700873
874 spin_unlock_irqrestore(&iommu->register_lock, flag);
875}
876
877static void iommu_flush_write_buffer(struct intel_iommu *iommu)
878{
879 u32 val;
880 unsigned long flag;
881
David Woodhouse9af88142009-02-13 23:18:03 +0000882 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700883 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700884
885 spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100886 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700887
888 /* Make sure hardware complete it */
889 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100890 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700891
892 spin_unlock_irqrestore(&iommu->register_lock, flag);
893}
894
895/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100896static void __iommu_flush_context(struct intel_iommu *iommu,
897 u16 did, u16 source_id, u8 function_mask,
898 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700899{
900 u64 val = 0;
901 unsigned long flag;
902
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700903 switch (type) {
904 case DMA_CCMD_GLOBAL_INVL:
905 val = DMA_CCMD_GLOBAL_INVL;
906 break;
907 case DMA_CCMD_DOMAIN_INVL:
908 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
909 break;
910 case DMA_CCMD_DEVICE_INVL:
911 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
912 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
913 break;
914 default:
915 BUG();
916 }
917 val |= DMA_CCMD_ICC;
918
919 spin_lock_irqsave(&iommu->register_lock, flag);
920 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
921
922 /* Make sure hardware complete it */
923 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
924 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
925
926 spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700927}
928
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700929/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100930static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
931 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700932{
933 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
934 u64 val = 0, val_iva = 0;
935 unsigned long flag;
936
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700937 switch (type) {
938 case DMA_TLB_GLOBAL_FLUSH:
939 /* global flush doesn't need set IVA_REG */
940 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
941 break;
942 case DMA_TLB_DSI_FLUSH:
943 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
944 break;
945 case DMA_TLB_PSI_FLUSH:
946 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
947 /* Note: always flush non-leaf currently */
948 val_iva = size_order | addr;
949 break;
950 default:
951 BUG();
952 }
953 /* Note: set drain read/write */
954#if 0
955 /*
956 * This is probably to be super secure.. Looks like we can
957 * ignore it without any impact.
958 */
959 if (cap_read_drain(iommu->cap))
960 val |= DMA_TLB_READ_DRAIN;
961#endif
962 if (cap_write_drain(iommu->cap))
963 val |= DMA_TLB_WRITE_DRAIN;
964
965 spin_lock_irqsave(&iommu->register_lock, flag);
966 /* Note: Only uses first TLB reg currently */
967 if (val_iva)
968 dmar_writeq(iommu->reg + tlb_offset, val_iva);
969 dmar_writeq(iommu->reg + tlb_offset + 8, val);
970
971 /* Make sure hardware complete it */
972 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
973 dmar_readq, (!(val & DMA_TLB_IVT)), val);
974
975 spin_unlock_irqrestore(&iommu->register_lock, flag);
976
977 /* check IOTLB invalidation granularity */
978 if (DMA_TLB_IAIG(val) == 0)
979 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
980 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
981 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700982 (unsigned long long)DMA_TLB_IIRG(type),
983 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700984}
985
Yu Zhao93a23a72009-05-18 13:51:37 +0800986static struct device_domain_info *iommu_support_dev_iotlb(
987 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700988{
Yu Zhao93a23a72009-05-18 13:51:37 +0800989 int found = 0;
990 unsigned long flags;
991 struct device_domain_info *info;
992 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
993
994 if (!ecap_dev_iotlb_support(iommu->ecap))
995 return NULL;
996
997 if (!iommu->qi)
998 return NULL;
999
1000 spin_lock_irqsave(&device_domain_lock, flags);
1001 list_for_each_entry(info, &domain->devices, link)
1002 if (info->bus == bus && info->devfn == devfn) {
1003 found = 1;
1004 break;
1005 }
1006 spin_unlock_irqrestore(&device_domain_lock, flags);
1007
1008 if (!found || !info->dev)
1009 return NULL;
1010
1011 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1012 return NULL;
1013
1014 if (!dmar_find_matched_atsr_unit(info->dev))
1015 return NULL;
1016
1017 info->iommu = iommu;
1018
1019 return info;
1020}
1021
1022static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1023{
1024 if (!info)
1025 return;
1026
1027 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1028}
1029
1030static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1031{
1032 if (!info->dev || !pci_ats_enabled(info->dev))
1033 return;
1034
1035 pci_disable_ats(info->dev);
1036}
1037
1038static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1039 u64 addr, unsigned mask)
1040{
1041 u16 sid, qdep;
1042 unsigned long flags;
1043 struct device_domain_info *info;
1044
1045 spin_lock_irqsave(&device_domain_lock, flags);
1046 list_for_each_entry(info, &domain->devices, link) {
1047 if (!info->dev || !pci_ats_enabled(info->dev))
1048 continue;
1049
1050 sid = info->bus << 8 | info->devfn;
1051 qdep = pci_ats_queue_depth(info->dev);
1052 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1053 }
1054 spin_unlock_irqrestore(&device_domain_lock, flags);
1055}
1056
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001057static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouse03d6a242009-06-28 15:33:46 +01001058 unsigned long pfn, unsigned int pages)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001059{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001060 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001061 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001062
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001063 BUG_ON(pages == 0);
1064
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001065 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001066 * Fallback to domain selective flush if no PSI support or the size is
1067 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001068 * PSI requires page size to be 2 ^ x, and the base address is naturally
1069 * aligned to the size
1070 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001071 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1072 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001073 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001074 else
1075 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1076 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001077
1078 /*
1079 * In caching mode, domain ID 0 is reserved for non-present to present
1080 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1081 */
1082 if (!cap_caching_mode(iommu->cap) || did)
Yu Zhao93a23a72009-05-18 13:51:37 +08001083 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001084}
1085
mark grossf8bab732008-02-08 04:18:38 -08001086static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1087{
1088 u32 pmen;
1089 unsigned long flags;
1090
1091 spin_lock_irqsave(&iommu->register_lock, flags);
1092 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1093 pmen &= ~DMA_PMEN_EPM;
1094 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1095
1096 /* wait for the protected region status bit to clear */
1097 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1098 readl, !(pmen & DMA_PMEN_PRS), pmen);
1099
1100 spin_unlock_irqrestore(&iommu->register_lock, flags);
1101}
1102
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001103static int iommu_enable_translation(struct intel_iommu *iommu)
1104{
1105 u32 sts;
1106 unsigned long flags;
1107
1108 spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001109 iommu->gcmd |= DMA_GCMD_TE;
1110 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001111
1112 /* Make sure hardware complete it */
1113 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001114 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001115
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001116 spin_unlock_irqrestore(&iommu->register_lock, flags);
1117 return 0;
1118}
1119
1120static int iommu_disable_translation(struct intel_iommu *iommu)
1121{
1122 u32 sts;
1123 unsigned long flag;
1124
1125 spin_lock_irqsave(&iommu->register_lock, flag);
1126 iommu->gcmd &= ~DMA_GCMD_TE;
1127 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1128
1129 /* Make sure hardware complete it */
1130 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001131 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001132
1133 spin_unlock_irqrestore(&iommu->register_lock, flag);
1134 return 0;
1135}
1136
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001137
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001138static int iommu_init_domains(struct intel_iommu *iommu)
1139{
1140 unsigned long ndomains;
1141 unsigned long nlongs;
1142
1143 ndomains = cap_ndoms(iommu->cap);
1144 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1145 nlongs = BITS_TO_LONGS(ndomains);
1146
1147 /* TBD: there might be 64K domains,
1148 * consider other allocation for future chip
1149 */
1150 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1151 if (!iommu->domain_ids) {
1152 printk(KERN_ERR "Allocating domain id array failed\n");
1153 return -ENOMEM;
1154 }
1155 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1156 GFP_KERNEL);
1157 if (!iommu->domains) {
1158 printk(KERN_ERR "Allocating domain array failed\n");
1159 kfree(iommu->domain_ids);
1160 return -ENOMEM;
1161 }
1162
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001163 spin_lock_init(&iommu->lock);
1164
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001165 /*
1166 * if Caching mode is set, then invalid translations are tagged
1167 * with domainid 0. Hence we need to pre-allocate it.
1168 */
1169 if (cap_caching_mode(iommu->cap))
1170 set_bit(0, iommu->domain_ids);
1171 return 0;
1172}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001173
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001174
1175static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001176static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001177
1178void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001179{
1180 struct dmar_domain *domain;
1181 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001182 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001183
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001184 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1185 for (; i < cap_ndoms(iommu->cap); ) {
1186 domain = iommu->domains[i];
1187 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001188
1189 spin_lock_irqsave(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001190 if (--domain->iommu_count == 0) {
1191 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1192 vm_domain_exit(domain);
1193 else
1194 domain_exit(domain);
1195 }
Weidong Hanc7151a82008-12-08 22:51:37 +08001196 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1197
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001198 i = find_next_bit(iommu->domain_ids,
1199 cap_ndoms(iommu->cap), i+1);
1200 }
1201
1202 if (iommu->gcmd & DMA_GCMD_TE)
1203 iommu_disable_translation(iommu);
1204
1205 if (iommu->irq) {
1206 set_irq_data(iommu->irq, NULL);
1207 /* This will mask the irq */
1208 free_irq(iommu->irq, iommu);
1209 destroy_irq(iommu->irq);
1210 }
1211
1212 kfree(iommu->domains);
1213 kfree(iommu->domain_ids);
1214
Weidong Hand9630fe2008-12-08 11:06:32 +08001215 g_iommus[iommu->seq_id] = NULL;
1216
1217 /* if all iommus are freed, free g_iommus */
1218 for (i = 0; i < g_num_of_iommus; i++) {
1219 if (g_iommus[i])
1220 break;
1221 }
1222
1223 if (i == g_num_of_iommus)
1224 kfree(g_iommus);
1225
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001226 /* free context mapping */
1227 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001228}
1229
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001230static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001231{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001232 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001233
1234 domain = alloc_domain_mem();
1235 if (!domain)
1236 return NULL;
1237
Weidong Han8c11e792008-12-08 15:29:22 +08001238 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
Weidong Hand71a2f32008-12-07 21:13:41 +08001239 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001240
1241 return domain;
1242}
1243
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001244static int iommu_attach_domain(struct dmar_domain *domain,
1245 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001246{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001247 int num;
1248 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001249 unsigned long flags;
1250
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001251 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001252
1253 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001254
1255 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1256 if (num >= ndomains) {
1257 spin_unlock_irqrestore(&iommu->lock, flags);
1258 printk(KERN_ERR "IOMMU: no free domain ids\n");
1259 return -ENOMEM;
1260 }
1261
1262 domain->id = num;
1263 set_bit(num, iommu->domain_ids);
1264 set_bit(iommu->seq_id, &domain->iommu_bmp);
1265 iommu->domains[num] = domain;
1266 spin_unlock_irqrestore(&iommu->lock, flags);
1267
1268 return 0;
1269}
1270
1271static void iommu_detach_domain(struct dmar_domain *domain,
1272 struct intel_iommu *iommu)
1273{
1274 unsigned long flags;
1275 int num, ndomains;
1276 int found = 0;
1277
1278 spin_lock_irqsave(&iommu->lock, flags);
1279 ndomains = cap_ndoms(iommu->cap);
1280 num = find_first_bit(iommu->domain_ids, ndomains);
1281 for (; num < ndomains; ) {
1282 if (iommu->domains[num] == domain) {
1283 found = 1;
1284 break;
1285 }
1286 num = find_next_bit(iommu->domain_ids,
1287 cap_ndoms(iommu->cap), num+1);
1288 }
1289
1290 if (found) {
1291 clear_bit(num, iommu->domain_ids);
1292 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1293 iommu->domains[num] = NULL;
1294 }
Weidong Han8c11e792008-12-08 15:29:22 +08001295 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001296}
1297
1298static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001299static struct lock_class_key reserved_alloc_key;
1300static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001301
1302static void dmar_init_reserved_ranges(void)
1303{
1304 struct pci_dev *pdev = NULL;
1305 struct iova *iova;
1306 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001307
David Millerf6611972008-02-06 01:36:23 -08001308 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001309
Mark Gross8a443df2008-03-04 14:59:31 -08001310 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1311 &reserved_alloc_key);
1312 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1313 &reserved_rbtree_key);
1314
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001315 /* IOAPIC ranges shouldn't be accessed by DMA */
1316 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1317 IOVA_PFN(IOAPIC_RANGE_END));
1318 if (!iova)
1319 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1320
1321 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1322 for_each_pci_dev(pdev) {
1323 struct resource *r;
1324
1325 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1326 r = &pdev->resource[i];
1327 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1328 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001329 iova = reserve_iova(&reserved_iova_list,
1330 IOVA_PFN(r->start),
1331 IOVA_PFN(r->end));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001332 if (!iova)
1333 printk(KERN_ERR "Reserve iova failed\n");
1334 }
1335 }
1336
1337}
1338
1339static void domain_reserve_special_ranges(struct dmar_domain *domain)
1340{
1341 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1342}
1343
1344static inline int guestwidth_to_adjustwidth(int gaw)
1345{
1346 int agaw;
1347 int r = (gaw - 12) % 9;
1348
1349 if (r == 0)
1350 agaw = gaw;
1351 else
1352 agaw = gaw + 9 - r;
1353 if (agaw > 64)
1354 agaw = 64;
1355 return agaw;
1356}
1357
1358static int domain_init(struct dmar_domain *domain, int guest_width)
1359{
1360 struct intel_iommu *iommu;
1361 int adjust_width, agaw;
1362 unsigned long sagaw;
1363
David Millerf6611972008-02-06 01:36:23 -08001364 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001365 spin_lock_init(&domain->mapping_lock);
Weidong Hanc7151a82008-12-08 22:51:37 +08001366 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001367
1368 domain_reserve_special_ranges(domain);
1369
1370 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001371 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001372 if (guest_width > cap_mgaw(iommu->cap))
1373 guest_width = cap_mgaw(iommu->cap);
1374 domain->gaw = guest_width;
1375 adjust_width = guestwidth_to_adjustwidth(guest_width);
1376 agaw = width_to_agaw(adjust_width);
1377 sagaw = cap_sagaw(iommu->cap);
1378 if (!test_bit(agaw, &sagaw)) {
1379 /* hardware doesn't support it, choose a bigger one */
1380 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1381 agaw = find_next_bit(&sagaw, 5, agaw);
1382 if (agaw >= 5)
1383 return -ENODEV;
1384 }
1385 domain->agaw = agaw;
1386 INIT_LIST_HEAD(&domain->devices);
1387
Weidong Han8e6040972008-12-08 15:49:06 +08001388 if (ecap_coherent(iommu->ecap))
1389 domain->iommu_coherency = 1;
1390 else
1391 domain->iommu_coherency = 0;
1392
Sheng Yang58c610b2009-03-18 15:33:05 +08001393 if (ecap_sc_support(iommu->ecap))
1394 domain->iommu_snooping = 1;
1395 else
1396 domain->iommu_snooping = 0;
1397
Weidong Hanc7151a82008-12-08 22:51:37 +08001398 domain->iommu_count = 1;
1399
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001400 /* always allocate the top pgd */
1401 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1402 if (!domain->pgd)
1403 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001404 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001405 return 0;
1406}
1407
1408static void domain_exit(struct dmar_domain *domain)
1409{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001410 struct dmar_drhd_unit *drhd;
1411 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001412
1413 /* Domain 0 is reserved, so dont process it */
1414 if (!domain)
1415 return;
1416
1417 domain_remove_dev_info(domain);
1418 /* destroy iovas */
1419 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001420
1421 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001422 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001423
1424 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001425 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001426
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001427 for_each_active_iommu(iommu, drhd)
1428 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1429 iommu_detach_domain(domain, iommu);
1430
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001431 free_domain_mem(domain);
1432}
1433
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001434static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1435 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001436{
1437 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001438 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001439 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001440 struct dma_pte *pgd;
1441 unsigned long num;
1442 unsigned long ndomains;
1443 int id;
1444 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001445 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001446
1447 pr_debug("Set context mapping for %02x:%02x.%d\n",
1448 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001449
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001450 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001451 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1452 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001453
David Woodhouse276dbf992009-04-04 01:45:37 +01001454 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001455 if (!iommu)
1456 return -ENODEV;
1457
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001458 context = device_to_context_entry(iommu, bus, devfn);
1459 if (!context)
1460 return -ENOMEM;
1461 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001462 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001463 spin_unlock_irqrestore(&iommu->lock, flags);
1464 return 0;
1465 }
1466
Weidong Hanea6606b2008-12-08 23:08:15 +08001467 id = domain->id;
1468 pgd = domain->pgd;
1469
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001470 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1471 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001472 int found = 0;
1473
1474 /* find an available domain id for this device in iommu */
1475 ndomains = cap_ndoms(iommu->cap);
1476 num = find_first_bit(iommu->domain_ids, ndomains);
1477 for (; num < ndomains; ) {
1478 if (iommu->domains[num] == domain) {
1479 id = num;
1480 found = 1;
1481 break;
1482 }
1483 num = find_next_bit(iommu->domain_ids,
1484 cap_ndoms(iommu->cap), num+1);
1485 }
1486
1487 if (found == 0) {
1488 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1489 if (num >= ndomains) {
1490 spin_unlock_irqrestore(&iommu->lock, flags);
1491 printk(KERN_ERR "IOMMU: no free domain ids\n");
1492 return -EFAULT;
1493 }
1494
1495 set_bit(num, iommu->domain_ids);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001496 set_bit(iommu->seq_id, &domain->iommu_bmp);
Weidong Hanea6606b2008-12-08 23:08:15 +08001497 iommu->domains[num] = domain;
1498 id = num;
1499 }
1500
1501 /* Skip top levels of page tables for
1502 * iommu which has less agaw than default.
1503 */
1504 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1505 pgd = phys_to_virt(dma_pte_addr(pgd));
1506 if (!dma_pte_present(pgd)) {
1507 spin_unlock_irqrestore(&iommu->lock, flags);
1508 return -ENOMEM;
1509 }
1510 }
1511 }
1512
1513 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001514
Yu Zhao93a23a72009-05-18 13:51:37 +08001515 if (translation != CONTEXT_TT_PASS_THROUGH) {
1516 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1517 translation = info ? CONTEXT_TT_DEV_IOTLB :
1518 CONTEXT_TT_MULTI_LEVEL;
1519 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001520 /*
1521 * In pass through mode, AW must be programmed to indicate the largest
1522 * AGAW value supported by hardware. And ASR is ignored by hardware.
1523 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001524 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001525 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001526 else {
1527 context_set_address_root(context, virt_to_phys(pgd));
1528 context_set_address_width(context, iommu->agaw);
1529 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001530
1531 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001532 context_set_fault_enable(context);
1533 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001534 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001535
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001536 /*
1537 * It's a non-present to present mapping. If hardware doesn't cache
1538 * non-present entry we only need to flush the write-buffer. If the
1539 * _does_ cache non-present entries, then it does so in the special
1540 * domain #0, which we have to flush:
1541 */
1542 if (cap_caching_mode(iommu->cap)) {
1543 iommu->flush.flush_context(iommu, 0,
1544 (((u16)bus) << 8) | devfn,
1545 DMA_CCMD_MASK_NOBIT,
1546 DMA_CCMD_DEVICE_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001547 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001548 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001549 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001550 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001551 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001552 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001553
1554 spin_lock_irqsave(&domain->iommu_lock, flags);
1555 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1556 domain->iommu_count++;
Sheng Yang58c610b2009-03-18 15:33:05 +08001557 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001558 }
1559 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001560 return 0;
1561}
1562
1563static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001564domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1565 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001566{
1567 int ret;
1568 struct pci_dev *tmp, *parent;
1569
David Woodhouse276dbf992009-04-04 01:45:37 +01001570 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001571 pdev->bus->number, pdev->devfn,
1572 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001573 if (ret)
1574 return ret;
1575
1576 /* dependent device mapping */
1577 tmp = pci_find_upstream_pcie_bridge(pdev);
1578 if (!tmp)
1579 return 0;
1580 /* Secondary interface's bus number and devfn 0 */
1581 parent = pdev->bus->self;
1582 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001583 ret = domain_context_mapping_one(domain,
1584 pci_domain_nr(parent->bus),
1585 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001586 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001587 if (ret)
1588 return ret;
1589 parent = parent->bus->self;
1590 }
1591 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1592 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001593 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001594 tmp->subordinate->number, 0,
1595 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001596 else /* this is a legacy PCI bridge */
1597 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001598 pci_domain_nr(tmp->bus),
1599 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001600 tmp->devfn,
1601 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001602}
1603
Weidong Han5331fe62008-12-08 23:00:00 +08001604static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001605{
1606 int ret;
1607 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001608 struct intel_iommu *iommu;
1609
David Woodhouse276dbf992009-04-04 01:45:37 +01001610 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1611 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001612 if (!iommu)
1613 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001614
David Woodhouse276dbf992009-04-04 01:45:37 +01001615 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001616 if (!ret)
1617 return ret;
1618 /* dependent device mapping */
1619 tmp = pci_find_upstream_pcie_bridge(pdev);
1620 if (!tmp)
1621 return ret;
1622 /* Secondary interface's bus number and devfn 0 */
1623 parent = pdev->bus->self;
1624 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001625 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001626 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001627 if (!ret)
1628 return ret;
1629 parent = parent->bus->self;
1630 }
1631 if (tmp->is_pcie)
David Woodhouse276dbf992009-04-04 01:45:37 +01001632 return device_context_mapped(iommu, tmp->subordinate->number,
1633 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001634 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001635 return device_context_mapped(iommu, tmp->bus->number,
1636 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001637}
1638
David Woodhouse61df7442009-06-28 11:55:58 +01001639static int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1640 unsigned long phys_pfn, unsigned long nr_pages,
1641 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001642{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001643 struct dma_pte *pte;
David Woodhouse1c5a46e2009-06-28 10:53:37 +01001644 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001645
David Woodhouse61df7442009-06-28 11:55:58 +01001646 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001647
1648 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1649 return -EINVAL;
David Woodhouse1c5a46e2009-06-28 10:53:37 +01001650
David Woodhouse61df7442009-06-28 11:55:58 +01001651 while (nr_pages--) {
1652 pte = pfn_to_dma_pte(domain, iov_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001653 if (!pte)
1654 return -ENOMEM;
1655 /* We don't need lock here, nobody else
1656 * touches the iova range
1657 */
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001658 BUG_ON(dma_pte_addr(pte));
David Woodhouse61df7442009-06-28 11:55:58 +01001659 dma_set_pte_pfn(pte, phys_pfn);
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001660 dma_set_pte_prot(pte, prot);
Sheng Yang9cf06692009-03-18 15:33:07 +08001661 if (prot & DMA_PTE_SNP)
1662 dma_set_pte_snp(pte);
Weidong Han5331fe62008-12-08 23:00:00 +08001663 domain_flush_cache(domain, pte, sizeof(*pte));
David Woodhouse61df7442009-06-28 11:55:58 +01001664 iov_pfn++;
1665 phys_pfn++;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001666 }
1667 return 0;
1668}
1669
Weidong Hanc7151a82008-12-08 22:51:37 +08001670static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001671{
Weidong Hanc7151a82008-12-08 22:51:37 +08001672 if (!iommu)
1673 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001674
1675 clear_context_table(iommu, bus, devfn);
1676 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001677 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001678 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001679}
1680
1681static void domain_remove_dev_info(struct dmar_domain *domain)
1682{
1683 struct device_domain_info *info;
1684 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001685 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001686
1687 spin_lock_irqsave(&device_domain_lock, flags);
1688 while (!list_empty(&domain->devices)) {
1689 info = list_entry(domain->devices.next,
1690 struct device_domain_info, link);
1691 list_del(&info->link);
1692 list_del(&info->global);
1693 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001694 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001695 spin_unlock_irqrestore(&device_domain_lock, flags);
1696
Yu Zhao93a23a72009-05-18 13:51:37 +08001697 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001698 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001699 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001700 free_devinfo_mem(info);
1701
1702 spin_lock_irqsave(&device_domain_lock, flags);
1703 }
1704 spin_unlock_irqrestore(&device_domain_lock, flags);
1705}
1706
1707/*
1708 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001709 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001710 */
Kay, Allen M38717942008-09-09 18:37:29 +03001711static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001712find_domain(struct pci_dev *pdev)
1713{
1714 struct device_domain_info *info;
1715
1716 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001717 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001718 if (info)
1719 return info->domain;
1720 return NULL;
1721}
1722
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001723/* domain is initialized */
1724static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1725{
1726 struct dmar_domain *domain, *found = NULL;
1727 struct intel_iommu *iommu;
1728 struct dmar_drhd_unit *drhd;
1729 struct device_domain_info *info, *tmp;
1730 struct pci_dev *dev_tmp;
1731 unsigned long flags;
1732 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001733 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001734 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001735
1736 domain = find_domain(pdev);
1737 if (domain)
1738 return domain;
1739
David Woodhouse276dbf992009-04-04 01:45:37 +01001740 segment = pci_domain_nr(pdev->bus);
1741
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001742 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1743 if (dev_tmp) {
1744 if (dev_tmp->is_pcie) {
1745 bus = dev_tmp->subordinate->number;
1746 devfn = 0;
1747 } else {
1748 bus = dev_tmp->bus->number;
1749 devfn = dev_tmp->devfn;
1750 }
1751 spin_lock_irqsave(&device_domain_lock, flags);
1752 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001753 if (info->segment == segment &&
1754 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001755 found = info->domain;
1756 break;
1757 }
1758 }
1759 spin_unlock_irqrestore(&device_domain_lock, flags);
1760 /* pcie-pci bridge already has a domain, uses it */
1761 if (found) {
1762 domain = found;
1763 goto found_domain;
1764 }
1765 }
1766
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001767 domain = alloc_domain();
1768 if (!domain)
1769 goto error;
1770
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001771 /* Allocate new domain for the device */
1772 drhd = dmar_find_matched_drhd_unit(pdev);
1773 if (!drhd) {
1774 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1775 pci_name(pdev));
1776 return NULL;
1777 }
1778 iommu = drhd->iommu;
1779
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001780 ret = iommu_attach_domain(domain, iommu);
1781 if (ret) {
1782 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001783 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001784 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001785
1786 if (domain_init(domain, gaw)) {
1787 domain_exit(domain);
1788 goto error;
1789 }
1790
1791 /* register pcie-to-pci device */
1792 if (dev_tmp) {
1793 info = alloc_devinfo_mem();
1794 if (!info) {
1795 domain_exit(domain);
1796 goto error;
1797 }
David Woodhouse276dbf992009-04-04 01:45:37 +01001798 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001799 info->bus = bus;
1800 info->devfn = devfn;
1801 info->dev = NULL;
1802 info->domain = domain;
1803 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08001804 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001805
1806 /* pcie-to-pci bridge already has a domain, uses it */
1807 found = NULL;
1808 spin_lock_irqsave(&device_domain_lock, flags);
1809 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001810 if (tmp->segment == segment &&
1811 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001812 found = tmp->domain;
1813 break;
1814 }
1815 }
1816 if (found) {
1817 free_devinfo_mem(info);
1818 domain_exit(domain);
1819 domain = found;
1820 } else {
1821 list_add(&info->link, &domain->devices);
1822 list_add(&info->global, &device_domain_list);
1823 }
1824 spin_unlock_irqrestore(&device_domain_lock, flags);
1825 }
1826
1827found_domain:
1828 info = alloc_devinfo_mem();
1829 if (!info)
1830 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01001831 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001832 info->bus = pdev->bus->number;
1833 info->devfn = pdev->devfn;
1834 info->dev = pdev;
1835 info->domain = domain;
1836 spin_lock_irqsave(&device_domain_lock, flags);
1837 /* somebody is fast */
1838 found = find_domain(pdev);
1839 if (found != NULL) {
1840 spin_unlock_irqrestore(&device_domain_lock, flags);
1841 if (found != domain) {
1842 domain_exit(domain);
1843 domain = found;
1844 }
1845 free_devinfo_mem(info);
1846 return domain;
1847 }
1848 list_add(&info->link, &domain->devices);
1849 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001850 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001851 spin_unlock_irqrestore(&device_domain_lock, flags);
1852 return domain;
1853error:
1854 /* recheck it here, maybe others set it */
1855 return find_domain(pdev);
1856}
1857
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001858static int iommu_identity_mapping;
1859
David Woodhouseb2132032009-06-26 18:50:28 +01001860static int iommu_domain_identity_map(struct dmar_domain *domain,
1861 unsigned long long start,
1862 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001863{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001864 unsigned long size;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001865 unsigned long long base;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001866
1867 /* The address might not be aligned */
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001868 base = start & PAGE_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001869 size = end - base;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001870 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001871 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1872 IOVA_PFN(base + size) - 1)) {
1873 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01001874 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001875 }
1876
David Woodhouseb2132032009-06-26 18:50:28 +01001877 pr_debug("Mapping reserved region %lx@%llx for domain %d\n",
1878 size, base, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001879 /*
1880 * RMRR range might have overlap with physical memory range,
1881 * clear it first
1882 */
David Woodhouse595badf2009-06-27 22:09:11 +01001883 dma_pte_clear_range(domain, base >> VTD_PAGE_SHIFT,
1884 (base + size - 1) >> VTD_PAGE_SHIFT);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001885
David Woodhouse61df7442009-06-28 11:55:58 +01001886 return domain_pfn_mapping(domain, base >> VTD_PAGE_SHIFT,
1887 base >> VTD_PAGE_SHIFT,
1888 size >> VTD_PAGE_SHIFT,
1889 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01001890}
1891
1892static int iommu_prepare_identity_map(struct pci_dev *pdev,
1893 unsigned long long start,
1894 unsigned long long end)
1895{
1896 struct dmar_domain *domain;
1897 int ret;
1898
1899 printk(KERN_INFO
1900 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1901 pci_name(pdev), start, end);
1902
David Woodhousec7ab48d2009-06-26 19:10:36 +01001903 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01001904 if (!domain)
1905 return -ENOMEM;
1906
1907 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001908 if (ret)
1909 goto error;
1910
1911 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001912 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01001913 if (ret)
1914 goto error;
1915
1916 return 0;
1917
1918 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001919 domain_exit(domain);
1920 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001921}
1922
1923static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1924 struct pci_dev *pdev)
1925{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001926 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001927 return 0;
1928 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1929 rmrr->end_address + 1);
1930}
1931
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001932#ifdef CONFIG_DMAR_FLOPPY_WA
1933static inline void iommu_prepare_isa(void)
1934{
1935 struct pci_dev *pdev;
1936 int ret;
1937
1938 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1939 if (!pdev)
1940 return;
1941
David Woodhousec7ab48d2009-06-26 19:10:36 +01001942 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001943 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1944
1945 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01001946 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
1947 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001948
1949}
1950#else
1951static inline void iommu_prepare_isa(void)
1952{
1953 return;
1954}
1955#endif /* !CONFIG_DMAR_FLPY_WA */
1956
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001957/* Initialize each context entry as pass through.*/
1958static int __init init_context_pass_through(void)
1959{
1960 struct pci_dev *pdev = NULL;
1961 struct dmar_domain *domain;
1962 int ret;
1963
1964 for_each_pci_dev(pdev) {
1965 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1966 ret = domain_context_mapping(domain, pdev,
1967 CONTEXT_TT_PASS_THROUGH);
1968 if (ret)
1969 return ret;
1970 }
1971 return 0;
1972}
1973
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001974static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01001975
1976static int __init si_domain_work_fn(unsigned long start_pfn,
1977 unsigned long end_pfn, void *datax)
1978{
1979 int *ret = datax;
1980
1981 *ret = iommu_domain_identity_map(si_domain,
1982 (uint64_t)start_pfn << PAGE_SHIFT,
1983 (uint64_t)end_pfn << PAGE_SHIFT);
1984 return *ret;
1985
1986}
1987
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001988static int si_domain_init(void)
1989{
1990 struct dmar_drhd_unit *drhd;
1991 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01001992 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001993
1994 si_domain = alloc_domain();
1995 if (!si_domain)
1996 return -EFAULT;
1997
David Woodhousec7ab48d2009-06-26 19:10:36 +01001998 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001999
2000 for_each_active_iommu(iommu, drhd) {
2001 ret = iommu_attach_domain(si_domain, iommu);
2002 if (ret) {
2003 domain_exit(si_domain);
2004 return -EFAULT;
2005 }
2006 }
2007
2008 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2009 domain_exit(si_domain);
2010 return -EFAULT;
2011 }
2012
2013 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2014
David Woodhousec7ab48d2009-06-26 19:10:36 +01002015 for_each_online_node(nid) {
2016 work_with_active_regions(nid, si_domain_work_fn, &ret);
2017 if (ret)
2018 return ret;
2019 }
2020
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002021 return 0;
2022}
2023
2024static void domain_remove_one_dev_info(struct dmar_domain *domain,
2025 struct pci_dev *pdev);
2026static int identity_mapping(struct pci_dev *pdev)
2027{
2028 struct device_domain_info *info;
2029
2030 if (likely(!iommu_identity_mapping))
2031 return 0;
2032
2033
2034 list_for_each_entry(info, &si_domain->devices, link)
2035 if (info->dev == pdev)
2036 return 1;
2037 return 0;
2038}
2039
2040static int domain_add_dev_info(struct dmar_domain *domain,
2041 struct pci_dev *pdev)
2042{
2043 struct device_domain_info *info;
2044 unsigned long flags;
2045
2046 info = alloc_devinfo_mem();
2047 if (!info)
2048 return -ENOMEM;
2049
2050 info->segment = pci_domain_nr(pdev->bus);
2051 info->bus = pdev->bus->number;
2052 info->devfn = pdev->devfn;
2053 info->dev = pdev;
2054 info->domain = domain;
2055
2056 spin_lock_irqsave(&device_domain_lock, flags);
2057 list_add(&info->link, &domain->devices);
2058 list_add(&info->global, &device_domain_list);
2059 pdev->dev.archdata.iommu = info;
2060 spin_unlock_irqrestore(&device_domain_lock, flags);
2061
2062 return 0;
2063}
2064
2065static int iommu_prepare_static_identity_mapping(void)
2066{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002067 struct pci_dev *pdev = NULL;
2068 int ret;
2069
2070 ret = si_domain_init();
2071 if (ret)
2072 return -EFAULT;
2073
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002074 for_each_pci_dev(pdev) {
David Woodhousec7ab48d2009-06-26 19:10:36 +01002075 printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
2076 pci_name(pdev));
2077
2078 ret = domain_context_mapping(si_domain, pdev,
2079 CONTEXT_TT_MULTI_LEVEL);
2080 if (ret)
2081 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002082 ret = domain_add_dev_info(si_domain, pdev);
2083 if (ret)
2084 return ret;
2085 }
2086
2087 return 0;
2088}
2089
2090int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002091{
2092 struct dmar_drhd_unit *drhd;
2093 struct dmar_rmrr_unit *rmrr;
2094 struct pci_dev *pdev;
2095 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002096 int i, ret;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002097 int pass_through = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002098
2099 /*
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002100 * In case pass through can not be enabled, iommu tries to use identity
2101 * mapping.
2102 */
2103 if (iommu_pass_through)
2104 iommu_identity_mapping = 1;
2105
2106 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002107 * for each drhd
2108 * allocate root
2109 * initialize and program root entry to not present
2110 * endfor
2111 */
2112 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002113 g_num_of_iommus++;
2114 /*
2115 * lock not needed as this is only incremented in the single
2116 * threaded kernel __init code path all other access are read
2117 * only
2118 */
2119 }
2120
Weidong Hand9630fe2008-12-08 11:06:32 +08002121 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2122 GFP_KERNEL);
2123 if (!g_iommus) {
2124 printk(KERN_ERR "Allocating global iommu array failed\n");
2125 ret = -ENOMEM;
2126 goto error;
2127 }
2128
mark gross80b20dd2008-04-18 13:53:58 -07002129 deferred_flush = kzalloc(g_num_of_iommus *
2130 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2131 if (!deferred_flush) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002132 kfree(g_iommus);
mark gross5e0d2a62008-03-04 15:22:08 -08002133 ret = -ENOMEM;
2134 goto error;
2135 }
2136
mark gross5e0d2a62008-03-04 15:22:08 -08002137 for_each_drhd_unit(drhd) {
2138 if (drhd->ignored)
2139 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002140
2141 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002142 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002143
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002144 ret = iommu_init_domains(iommu);
2145 if (ret)
2146 goto error;
2147
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002148 /*
2149 * TBD:
2150 * we could share the same root & context tables
2151 * amoung all IOMMU's. Need to Split it later.
2152 */
2153 ret = iommu_alloc_root_entry(iommu);
2154 if (ret) {
2155 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2156 goto error;
2157 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002158 if (!ecap_pass_through(iommu->ecap))
2159 pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002160 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002161 if (iommu_pass_through)
2162 if (!pass_through) {
2163 printk(KERN_INFO
2164 "Pass Through is not supported by hardware.\n");
2165 iommu_pass_through = 0;
2166 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002167
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002168 /*
2169 * Start from the sane iommu hardware state.
2170 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002171 for_each_drhd_unit(drhd) {
2172 if (drhd->ignored)
2173 continue;
2174
2175 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002176
2177 /*
2178 * If the queued invalidation is already initialized by us
2179 * (for example, while enabling interrupt-remapping) then
2180 * we got the things already rolling from a sane state.
2181 */
2182 if (iommu->qi)
2183 continue;
2184
2185 /*
2186 * Clear any previous faults.
2187 */
2188 dmar_fault(-1, iommu);
2189 /*
2190 * Disable queued invalidation if supported and already enabled
2191 * before OS handover.
2192 */
2193 dmar_disable_qi(iommu);
2194 }
2195
2196 for_each_drhd_unit(drhd) {
2197 if (drhd->ignored)
2198 continue;
2199
2200 iommu = drhd->iommu;
2201
Youquan Songa77b67d2008-10-16 16:31:56 -07002202 if (dmar_enable_qi(iommu)) {
2203 /*
2204 * Queued Invalidate not enabled, use Register Based
2205 * Invalidate
2206 */
2207 iommu->flush.flush_context = __iommu_flush_context;
2208 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2209 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002210 "invalidation\n",
2211 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002212 } else {
2213 iommu->flush.flush_context = qi_flush_context;
2214 iommu->flush.flush_iotlb = qi_flush_iotlb;
2215 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002216 "invalidation\n",
2217 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002218 }
2219 }
2220
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002221 /*
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002222 * If pass through is set and enabled, context entries of all pci
2223 * devices are intialized by pass through translation type.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002224 */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002225 if (iommu_pass_through) {
2226 ret = init_context_pass_through();
2227 if (ret) {
2228 printk(KERN_ERR "IOMMU: Pass through init failed.\n");
2229 iommu_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002230 }
2231 }
2232
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002233 /*
2234 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002235 * identity mappings for rmrr, gfx, and isa and may fall back to static
2236 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002237 */
2238 if (!iommu_pass_through) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002239 if (iommu_identity_mapping)
2240 iommu_prepare_static_identity_mapping();
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002241 /*
2242 * For each rmrr
2243 * for each dev attached to rmrr
2244 * do
2245 * locate drhd for dev, alloc domain for dev
2246 * allocate free domain
2247 * allocate page table entries for rmrr
2248 * if context not allocated for bus
2249 * allocate and init context
2250 * set present in root table for this bus
2251 * init context with domain, translation etc
2252 * endfor
2253 * endfor
2254 */
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002255 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002256 for_each_rmrr_units(rmrr) {
2257 for (i = 0; i < rmrr->devices_cnt; i++) {
2258 pdev = rmrr->devices[i];
2259 /*
2260 * some BIOS lists non-exist devices in DMAR
2261 * table.
2262 */
2263 if (!pdev)
2264 continue;
2265 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2266 if (ret)
2267 printk(KERN_ERR
2268 "IOMMU: mapping reserved region failed\n");
2269 }
2270 }
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07002271
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002272 iommu_prepare_isa();
2273 }
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002274
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002275 /*
2276 * for each drhd
2277 * enable fault log
2278 * global invalidate context cache
2279 * global invalidate iotlb
2280 * enable translation
2281 */
2282 for_each_drhd_unit(drhd) {
2283 if (drhd->ignored)
2284 continue;
2285 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002286
2287 iommu_flush_write_buffer(iommu);
2288
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002289 ret = dmar_set_interrupt(iommu);
2290 if (ret)
2291 goto error;
2292
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002293 iommu_set_root_entry(iommu);
2294
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002295 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002296 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002297 iommu_disable_protect_mem_regions(iommu);
2298
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002299 ret = iommu_enable_translation(iommu);
2300 if (ret)
2301 goto error;
2302 }
2303
2304 return 0;
2305error:
2306 for_each_drhd_unit(drhd) {
2307 if (drhd->ignored)
2308 continue;
2309 iommu = drhd->iommu;
2310 free_iommu(iommu);
2311 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002312 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002313 return ret;
2314}
2315
David Woodhouse88cb6a72009-06-28 15:03:06 +01002316static inline unsigned long aligned_nrpages(unsigned long host_addr,
2317 size_t size)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002318{
David Woodhouse88cb6a72009-06-28 15:03:06 +01002319 host_addr &= ~PAGE_MASK;
2320 host_addr += size + PAGE_SIZE - 1;
2321
2322 return host_addr >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002323}
2324
2325struct iova *
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002326iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002327{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002328 struct iova *piova;
2329
2330 /* Make sure it's in range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002331 end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002332 if (!size || (IOVA_START_ADDR + size > end))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002333 return NULL;
2334
2335 piova = alloc_iova(&domain->iovad,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002336 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002337 return piova;
2338}
2339
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002340static struct iova *
2341__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002342 size_t size, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002343{
2344 struct pci_dev *pdev = to_pci_dev(dev);
2345 struct iova *iova = NULL;
2346
Yang Hongyang284901a2009-04-06 19:01:15 -07002347 if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002348 iova = iommu_alloc_iova(domain, size, dma_mask);
2349 else {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002350 /*
2351 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002352 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002353 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002354 */
Yang Hongyang284901a2009-04-06 19:01:15 -07002355 iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002356 if (!iova)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002357 iova = iommu_alloc_iova(domain, size, dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002358 }
2359
2360 if (!iova) {
2361 printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
2362 return NULL;
2363 }
2364
2365 return iova;
2366}
2367
2368static struct dmar_domain *
2369get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002370{
2371 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002372 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002373
2374 domain = get_domain_for_dev(pdev,
2375 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2376 if (!domain) {
2377 printk(KERN_ERR
2378 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002379 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002380 }
2381
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002382 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002383 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002384 ret = domain_context_mapping(domain, pdev,
2385 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002386 if (ret) {
2387 printk(KERN_ERR
2388 "Domain context map for %s failed",
2389 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002390 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002391 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002392 }
2393
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002394 return domain;
2395}
2396
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002397static int iommu_dummy(struct pci_dev *pdev)
2398{
2399 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2400}
2401
2402/* Check if the pdev needs to go through non-identity map and unmap process.*/
2403static int iommu_no_mapping(struct pci_dev *pdev)
2404{
2405 int found;
2406
2407 if (!iommu_identity_mapping)
2408 return iommu_dummy(pdev);
2409
2410 found = identity_mapping(pdev);
2411 if (found) {
2412 if (pdev->dma_mask > DMA_BIT_MASK(32))
2413 return 1;
2414 else {
2415 /*
2416 * 32 bit DMA is removed from si_domain and fall back
2417 * to non-identity mapping.
2418 */
2419 domain_remove_one_dev_info(si_domain, pdev);
2420 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2421 pci_name(pdev));
2422 return 0;
2423 }
2424 } else {
2425 /*
2426 * In case of a detached 64 bit DMA device from vm, the device
2427 * is put into si_domain for identity mapping.
2428 */
2429 if (pdev->dma_mask > DMA_BIT_MASK(32)) {
2430 int ret;
2431 ret = domain_add_dev_info(si_domain, pdev);
2432 if (!ret) {
2433 printk(KERN_INFO "64bit %s uses identity mapping\n",
2434 pci_name(pdev));
2435 return 1;
2436 }
2437 }
2438 }
2439
2440 return iommu_dummy(pdev);
2441}
2442
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002443static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2444 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002445{
2446 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002447 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002448 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002449 struct iova *iova;
2450 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002451 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002452 struct intel_iommu *iommu;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002453
2454 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002455
2456 if (iommu_no_mapping(pdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002457 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002458
2459 domain = get_valid_domain_for_dev(pdev);
2460 if (!domain)
2461 return 0;
2462
Weidong Han8c11e792008-12-08 15:29:22 +08002463 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002464 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002465
David Woodhouse0ab36de2009-06-28 14:01:43 +01002466 iova = __intel_alloc_iova(hwdev, domain, size << VTD_PAGE_SHIFT, pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002467 if (!iova)
2468 goto error;
2469
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002470 /*
2471 * Check if DMAR supports zero-length reads on write only
2472 * mappings..
2473 */
2474 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002475 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002476 prot |= DMA_PTE_READ;
2477 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2478 prot |= DMA_PTE_WRITE;
2479 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002480 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002481 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002482 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002483 * is not a big problem
2484 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002485 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2486 paddr >> VTD_PAGE_SHIFT, size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002487 if (ret)
2488 goto error;
2489
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002490 /* it's a non-present to present mapping. Only flush if caching mode */
2491 if (cap_caching_mode(iommu->cap))
David Woodhouse03d6a242009-06-28 15:33:46 +01002492 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002493 else
Weidong Han8c11e792008-12-08 15:29:22 +08002494 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002495
David Woodhouse03d6a242009-06-28 15:33:46 +01002496 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2497 start_paddr += paddr & ~PAGE_MASK;
2498 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002499
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002500error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002501 if (iova)
2502 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002503 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002504 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002505 return 0;
2506}
2507
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002508static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2509 unsigned long offset, size_t size,
2510 enum dma_data_direction dir,
2511 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002512{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002513 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2514 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002515}
2516
mark gross5e0d2a62008-03-04 15:22:08 -08002517static void flush_unmaps(void)
2518{
mark gross80b20dd2008-04-18 13:53:58 -07002519 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002520
mark gross5e0d2a62008-03-04 15:22:08 -08002521 timer_on = 0;
2522
2523 /* just flush them all */
2524 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002525 struct intel_iommu *iommu = g_iommus[i];
2526 if (!iommu)
2527 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002528
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002529 if (!deferred_flush[i].next)
2530 continue;
2531
2532 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002533 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002534 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002535 unsigned long mask;
2536 struct iova *iova = deferred_flush[i].iova[j];
2537
2538 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2539 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2540 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2541 iova->pfn_lo << PAGE_SHIFT, mask);
2542 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002543 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002544 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002545 }
2546
mark gross5e0d2a62008-03-04 15:22:08 -08002547 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002548}
2549
2550static void flush_unmaps_timeout(unsigned long data)
2551{
mark gross80b20dd2008-04-18 13:53:58 -07002552 unsigned long flags;
2553
2554 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002555 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002556 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002557}
2558
2559static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2560{
2561 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002562 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002563 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002564
2565 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002566 if (list_size == HIGH_WATER_MARK)
2567 flush_unmaps();
2568
Weidong Han8c11e792008-12-08 15:29:22 +08002569 iommu = domain_get_iommu(dom);
2570 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002571
mark gross80b20dd2008-04-18 13:53:58 -07002572 next = deferred_flush[iommu_id].next;
2573 deferred_flush[iommu_id].domain[next] = dom;
2574 deferred_flush[iommu_id].iova[next] = iova;
2575 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002576
2577 if (!timer_on) {
2578 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2579 timer_on = 1;
2580 }
2581 list_size++;
2582 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2583}
2584
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002585static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2586 size_t size, enum dma_data_direction dir,
2587 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002588{
2589 struct pci_dev *pdev = to_pci_dev(dev);
2590 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002591 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002592 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002593 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002594
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002595 if (iommu_no_mapping(pdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002596 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002597
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002598 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002599 BUG_ON(!domain);
2600
Weidong Han8c11e792008-12-08 15:29:22 +08002601 iommu = domain_get_iommu(domain);
2602
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002603 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2604 if (!iova)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002605 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002606
David Woodhoused794dc92009-06-28 00:27:49 +01002607 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2608 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002609
David Woodhoused794dc92009-06-28 00:27:49 +01002610 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2611 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002612
2613 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002614 dma_pte_clear_range(domain, start_pfn, last_pfn);
2615
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002616 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002617 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2618
mark gross5e0d2a62008-03-04 15:22:08 -08002619 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002620 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhoused794dc92009-06-28 00:27:49 +01002621 last_pfn - start_pfn + 1);
mark gross5e0d2a62008-03-04 15:22:08 -08002622 /* free iova */
2623 __free_iova(&domain->iovad, iova);
2624 } else {
2625 add_unmap(domain, iova);
2626 /*
2627 * queue up the release of the unmap to save the 1/6th of the
2628 * cpu used up by the iotlb flush operation...
2629 */
mark gross5e0d2a62008-03-04 15:22:08 -08002630 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002631}
2632
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002633static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2634 int dir)
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002635{
2636 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2637}
2638
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002639static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2640 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002641{
2642 void *vaddr;
2643 int order;
2644
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002645 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002646 order = get_order(size);
2647 flags &= ~(GFP_DMA | GFP_DMA32);
2648
2649 vaddr = (void *)__get_free_pages(flags, order);
2650 if (!vaddr)
2651 return NULL;
2652 memset(vaddr, 0, size);
2653
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002654 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2655 DMA_BIDIRECTIONAL,
2656 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002657 if (*dma_handle)
2658 return vaddr;
2659 free_pages((unsigned long)vaddr, order);
2660 return NULL;
2661}
2662
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002663static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2664 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002665{
2666 int order;
2667
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002668 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002669 order = get_order(size);
2670
2671 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2672 free_pages((unsigned long)vaddr, order);
2673}
2674
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002675static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2676 int nelems, enum dma_data_direction dir,
2677 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002678{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002679 struct pci_dev *pdev = to_pci_dev(hwdev);
2680 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002681 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002682 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002683 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002684
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002685 if (iommu_no_mapping(pdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002686 return;
2687
2688 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002689 BUG_ON(!domain);
2690
2691 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002692
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002693 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002694 if (!iova)
2695 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002696
David Woodhoused794dc92009-06-28 00:27:49 +01002697 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2698 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002699
2700 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002701 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002702
David Woodhoused794dc92009-06-28 00:27:49 +01002703 /* free page tables */
2704 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2705
David Woodhouse03d6a242009-06-28 15:33:46 +01002706 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhoused794dc92009-06-28 00:27:49 +01002707 (last_pfn - start_pfn + 1));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002708
2709 /* free iova */
2710 __free_iova(&domain->iovad, iova);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002711}
2712
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002713static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002714 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002715{
2716 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002717 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002718
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002719 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02002720 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00002721 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002722 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002723 }
2724 return nelems;
2725}
2726
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002727static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2728 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002729{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002730 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002731 struct pci_dev *pdev = to_pci_dev(hwdev);
2732 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002733 size_t size = 0;
2734 int prot = 0;
David Woodhouseb536d242009-06-28 14:49:31 +01002735 size_t offset_pfn = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002736 struct iova *iova = NULL;
2737 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002738 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01002739 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08002740 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002741
2742 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002743 if (iommu_no_mapping(pdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002744 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002745
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002746 domain = get_valid_domain_for_dev(pdev);
2747 if (!domain)
2748 return 0;
2749
Weidong Han8c11e792008-12-08 15:29:22 +08002750 iommu = domain_get_iommu(domain);
2751
David Woodhouseb536d242009-06-28 14:49:31 +01002752 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01002753 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002754
David Woodhouse88cb6a72009-06-28 15:03:06 +01002755 iova = __intel_alloc_iova(hwdev, domain, size << VTD_PAGE_SHIFT,
2756 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002757 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002758 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002759 return 0;
2760 }
2761
2762 /*
2763 * Check if DMAR supports zero-length reads on write only
2764 * mappings..
2765 */
2766 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002767 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002768 prot |= DMA_PTE_READ;
2769 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2770 prot |= DMA_PTE_WRITE;
2771
David Woodhouseb536d242009-06-28 14:49:31 +01002772 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
2773 offset_pfn = 0;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002774 for_each_sg(sglist, sg, nelems, i) {
David Woodhouse88cb6a72009-06-28 15:03:06 +01002775 int nr_pages = aligned_nrpages(sg->offset, sg->length);
David Woodhouseb536d242009-06-28 14:49:31 +01002776 ret = domain_pfn_mapping(domain, start_vpfn + offset_pfn,
2777 page_to_dma_pfn(sg_page(sg)),
2778 nr_pages, prot);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002779 if (ret) {
2780 /* clear the page */
David Woodhouseb536d242009-06-28 14:49:31 +01002781 dma_pte_clear_range(domain, start_vpfn,
2782 start_vpfn + offset_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002783 /* free page tables */
David Woodhouseb536d242009-06-28 14:49:31 +01002784 dma_pte_free_pagetable(domain, start_vpfn,
2785 start_vpfn + offset_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002786 /* free iova */
2787 __free_iova(&domain->iovad, iova);
2788 return 0;
2789 }
David Woodhouseb536d242009-06-28 14:49:31 +01002790 sg->dma_address = ((dma_addr_t)(start_vpfn + offset_pfn)
2791 << VTD_PAGE_SHIFT) + sg->offset;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002792 sg->dma_length = sg->length;
David Woodhouseb536d242009-06-28 14:49:31 +01002793 offset_pfn += nr_pages;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002794 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002795
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002796 /* it's a non-present to present mapping. Only flush if caching mode */
2797 if (cap_caching_mode(iommu->cap))
David Woodhouse03d6a242009-06-28 15:33:46 +01002798 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002799 else
Weidong Han8c11e792008-12-08 15:29:22 +08002800 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002801
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002802 return nelems;
2803}
2804
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002805static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2806{
2807 return !dma_addr;
2808}
2809
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002810struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002811 .alloc_coherent = intel_alloc_coherent,
2812 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002813 .map_sg = intel_map_sg,
2814 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002815 .map_page = intel_map_page,
2816 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002817 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002818};
2819
2820static inline int iommu_domain_cache_init(void)
2821{
2822 int ret = 0;
2823
2824 iommu_domain_cache = kmem_cache_create("iommu_domain",
2825 sizeof(struct dmar_domain),
2826 0,
2827 SLAB_HWCACHE_ALIGN,
2828
2829 NULL);
2830 if (!iommu_domain_cache) {
2831 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2832 ret = -ENOMEM;
2833 }
2834
2835 return ret;
2836}
2837
2838static inline int iommu_devinfo_cache_init(void)
2839{
2840 int ret = 0;
2841
2842 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2843 sizeof(struct device_domain_info),
2844 0,
2845 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002846 NULL);
2847 if (!iommu_devinfo_cache) {
2848 printk(KERN_ERR "Couldn't create devinfo cache\n");
2849 ret = -ENOMEM;
2850 }
2851
2852 return ret;
2853}
2854
2855static inline int iommu_iova_cache_init(void)
2856{
2857 int ret = 0;
2858
2859 iommu_iova_cache = kmem_cache_create("iommu_iova",
2860 sizeof(struct iova),
2861 0,
2862 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002863 NULL);
2864 if (!iommu_iova_cache) {
2865 printk(KERN_ERR "Couldn't create iova cache\n");
2866 ret = -ENOMEM;
2867 }
2868
2869 return ret;
2870}
2871
2872static int __init iommu_init_mempool(void)
2873{
2874 int ret;
2875 ret = iommu_iova_cache_init();
2876 if (ret)
2877 return ret;
2878
2879 ret = iommu_domain_cache_init();
2880 if (ret)
2881 goto domain_error;
2882
2883 ret = iommu_devinfo_cache_init();
2884 if (!ret)
2885 return ret;
2886
2887 kmem_cache_destroy(iommu_domain_cache);
2888domain_error:
2889 kmem_cache_destroy(iommu_iova_cache);
2890
2891 return -ENOMEM;
2892}
2893
2894static void __init iommu_exit_mempool(void)
2895{
2896 kmem_cache_destroy(iommu_devinfo_cache);
2897 kmem_cache_destroy(iommu_domain_cache);
2898 kmem_cache_destroy(iommu_iova_cache);
2899
2900}
2901
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002902static void __init init_no_remapping_devices(void)
2903{
2904 struct dmar_drhd_unit *drhd;
2905
2906 for_each_drhd_unit(drhd) {
2907 if (!drhd->include_all) {
2908 int i;
2909 for (i = 0; i < drhd->devices_cnt; i++)
2910 if (drhd->devices[i] != NULL)
2911 break;
2912 /* ignore DMAR unit if no pci devices exist */
2913 if (i == drhd->devices_cnt)
2914 drhd->ignored = 1;
2915 }
2916 }
2917
2918 if (dmar_map_gfx)
2919 return;
2920
2921 for_each_drhd_unit(drhd) {
2922 int i;
2923 if (drhd->ignored || drhd->include_all)
2924 continue;
2925
2926 for (i = 0; i < drhd->devices_cnt; i++)
2927 if (drhd->devices[i] &&
2928 !IS_GFX_DEVICE(drhd->devices[i]))
2929 break;
2930
2931 if (i < drhd->devices_cnt)
2932 continue;
2933
2934 /* bypass IOMMU if it is just for gfx devices */
2935 drhd->ignored = 1;
2936 for (i = 0; i < drhd->devices_cnt; i++) {
2937 if (!drhd->devices[i])
2938 continue;
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002939 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002940 }
2941 }
2942}
2943
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002944#ifdef CONFIG_SUSPEND
2945static int init_iommu_hw(void)
2946{
2947 struct dmar_drhd_unit *drhd;
2948 struct intel_iommu *iommu = NULL;
2949
2950 for_each_active_iommu(iommu, drhd)
2951 if (iommu->qi)
2952 dmar_reenable_qi(iommu);
2953
2954 for_each_active_iommu(iommu, drhd) {
2955 iommu_flush_write_buffer(iommu);
2956
2957 iommu_set_root_entry(iommu);
2958
2959 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002960 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002961 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002962 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002963 iommu_disable_protect_mem_regions(iommu);
2964 iommu_enable_translation(iommu);
2965 }
2966
2967 return 0;
2968}
2969
2970static void iommu_flush_all(void)
2971{
2972 struct dmar_drhd_unit *drhd;
2973 struct intel_iommu *iommu;
2974
2975 for_each_active_iommu(iommu, drhd) {
2976 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002977 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002978 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002979 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002980 }
2981}
2982
2983static int iommu_suspend(struct sys_device *dev, pm_message_t state)
2984{
2985 struct dmar_drhd_unit *drhd;
2986 struct intel_iommu *iommu = NULL;
2987 unsigned long flag;
2988
2989 for_each_active_iommu(iommu, drhd) {
2990 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
2991 GFP_ATOMIC);
2992 if (!iommu->iommu_state)
2993 goto nomem;
2994 }
2995
2996 iommu_flush_all();
2997
2998 for_each_active_iommu(iommu, drhd) {
2999 iommu_disable_translation(iommu);
3000
3001 spin_lock_irqsave(&iommu->register_lock, flag);
3002
3003 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3004 readl(iommu->reg + DMAR_FECTL_REG);
3005 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3006 readl(iommu->reg + DMAR_FEDATA_REG);
3007 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3008 readl(iommu->reg + DMAR_FEADDR_REG);
3009 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3010 readl(iommu->reg + DMAR_FEUADDR_REG);
3011
3012 spin_unlock_irqrestore(&iommu->register_lock, flag);
3013 }
3014 return 0;
3015
3016nomem:
3017 for_each_active_iommu(iommu, drhd)
3018 kfree(iommu->iommu_state);
3019
3020 return -ENOMEM;
3021}
3022
3023static int iommu_resume(struct sys_device *dev)
3024{
3025 struct dmar_drhd_unit *drhd;
3026 struct intel_iommu *iommu = NULL;
3027 unsigned long flag;
3028
3029 if (init_iommu_hw()) {
3030 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3031 return -EIO;
3032 }
3033
3034 for_each_active_iommu(iommu, drhd) {
3035
3036 spin_lock_irqsave(&iommu->register_lock, flag);
3037
3038 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3039 iommu->reg + DMAR_FECTL_REG);
3040 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3041 iommu->reg + DMAR_FEDATA_REG);
3042 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3043 iommu->reg + DMAR_FEADDR_REG);
3044 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3045 iommu->reg + DMAR_FEUADDR_REG);
3046
3047 spin_unlock_irqrestore(&iommu->register_lock, flag);
3048 }
3049
3050 for_each_active_iommu(iommu, drhd)
3051 kfree(iommu->iommu_state);
3052
3053 return 0;
3054}
3055
3056static struct sysdev_class iommu_sysclass = {
3057 .name = "iommu",
3058 .resume = iommu_resume,
3059 .suspend = iommu_suspend,
3060};
3061
3062static struct sys_device device_iommu = {
3063 .cls = &iommu_sysclass,
3064};
3065
3066static int __init init_iommu_sysfs(void)
3067{
3068 int error;
3069
3070 error = sysdev_class_register(&iommu_sysclass);
3071 if (error)
3072 return error;
3073
3074 error = sysdev_register(&device_iommu);
3075 if (error)
3076 sysdev_class_unregister(&iommu_sysclass);
3077
3078 return error;
3079}
3080
3081#else
3082static int __init init_iommu_sysfs(void)
3083{
3084 return 0;
3085}
3086#endif /* CONFIG_PM */
3087
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003088int __init intel_iommu_init(void)
3089{
3090 int ret = 0;
3091
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003092 if (dmar_table_init())
3093 return -ENODEV;
3094
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003095 if (dmar_dev_scope_init())
3096 return -ENODEV;
3097
Suresh Siddha2ae21012008-07-10 11:16:43 -07003098 /*
3099 * Check the need for DMA-remapping initialization now.
3100 * Above initialization will also be used by Interrupt-remapping.
3101 */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003102 if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003103 return -ENODEV;
3104
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003105 iommu_init_mempool();
3106 dmar_init_reserved_ranges();
3107
3108 init_no_remapping_devices();
3109
3110 ret = init_dmars();
3111 if (ret) {
3112 printk(KERN_ERR "IOMMU: dmar init failed\n");
3113 put_iova_domain(&reserved_iova_list);
3114 iommu_exit_mempool();
3115 return ret;
3116 }
3117 printk(KERN_INFO
3118 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3119
mark gross5e0d2a62008-03-04 15:22:08 -08003120 init_timer(&unmap_timer);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003121 force_iommu = 1;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003122
3123 if (!iommu_pass_through) {
3124 printk(KERN_INFO
3125 "Multi-level page-table translation for DMAR.\n");
3126 dma_ops = &intel_dma_ops;
3127 } else
3128 printk(KERN_INFO
3129 "DMAR: Pass through translation for DMAR.\n");
3130
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003131 init_iommu_sysfs();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003132
3133 register_iommu(&intel_iommu_ops);
3134
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003135 return 0;
3136}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003137
Han, Weidong3199aa62009-02-26 17:31:12 +08003138static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3139 struct pci_dev *pdev)
3140{
3141 struct pci_dev *tmp, *parent;
3142
3143 if (!iommu || !pdev)
3144 return;
3145
3146 /* dependent device detach */
3147 tmp = pci_find_upstream_pcie_bridge(pdev);
3148 /* Secondary interface's bus number and devfn 0 */
3149 if (tmp) {
3150 parent = pdev->bus->self;
3151 while (parent != tmp) {
3152 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003153 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003154 parent = parent->bus->self;
3155 }
3156 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3157 iommu_detach_dev(iommu,
3158 tmp->subordinate->number, 0);
3159 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003160 iommu_detach_dev(iommu, tmp->bus->number,
3161 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003162 }
3163}
3164
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003165static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003166 struct pci_dev *pdev)
3167{
3168 struct device_domain_info *info;
3169 struct intel_iommu *iommu;
3170 unsigned long flags;
3171 int found = 0;
3172 struct list_head *entry, *tmp;
3173
David Woodhouse276dbf992009-04-04 01:45:37 +01003174 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3175 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003176 if (!iommu)
3177 return;
3178
3179 spin_lock_irqsave(&device_domain_lock, flags);
3180 list_for_each_safe(entry, tmp, &domain->devices) {
3181 info = list_entry(entry, struct device_domain_info, link);
David Woodhouse276dbf992009-04-04 01:45:37 +01003182 /* No need to compare PCI domain; it has to be the same */
Weidong Hanc7151a82008-12-08 22:51:37 +08003183 if (info->bus == pdev->bus->number &&
3184 info->devfn == pdev->devfn) {
3185 list_del(&info->link);
3186 list_del(&info->global);
3187 if (info->dev)
3188 info->dev->dev.archdata.iommu = NULL;
3189 spin_unlock_irqrestore(&device_domain_lock, flags);
3190
Yu Zhao93a23a72009-05-18 13:51:37 +08003191 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003192 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003193 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003194 free_devinfo_mem(info);
3195
3196 spin_lock_irqsave(&device_domain_lock, flags);
3197
3198 if (found)
3199 break;
3200 else
3201 continue;
3202 }
3203
3204 /* if there is no other devices under the same iommu
3205 * owned by this domain, clear this iommu in iommu_bmp
3206 * update iommu count and coherency
3207 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003208 if (iommu == device_to_iommu(info->segment, info->bus,
3209 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003210 found = 1;
3211 }
3212
3213 if (found == 0) {
3214 unsigned long tmp_flags;
3215 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3216 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3217 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003218 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003219 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3220 }
3221
3222 spin_unlock_irqrestore(&device_domain_lock, flags);
3223}
3224
3225static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3226{
3227 struct device_domain_info *info;
3228 struct intel_iommu *iommu;
3229 unsigned long flags1, flags2;
3230
3231 spin_lock_irqsave(&device_domain_lock, flags1);
3232 while (!list_empty(&domain->devices)) {
3233 info = list_entry(domain->devices.next,
3234 struct device_domain_info, link);
3235 list_del(&info->link);
3236 list_del(&info->global);
3237 if (info->dev)
3238 info->dev->dev.archdata.iommu = NULL;
3239
3240 spin_unlock_irqrestore(&device_domain_lock, flags1);
3241
Yu Zhao93a23a72009-05-18 13:51:37 +08003242 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003243 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003244 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003245 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003246
3247 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003248 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003249 */
3250 spin_lock_irqsave(&domain->iommu_lock, flags2);
3251 if (test_and_clear_bit(iommu->seq_id,
3252 &domain->iommu_bmp)) {
3253 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003254 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003255 }
3256 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3257
3258 free_devinfo_mem(info);
3259 spin_lock_irqsave(&device_domain_lock, flags1);
3260 }
3261 spin_unlock_irqrestore(&device_domain_lock, flags1);
3262}
3263
Weidong Han5e98c4b2008-12-08 23:03:27 +08003264/* domain id for virtual machine, it won't be set in context */
3265static unsigned long vm_domid;
3266
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003267static int vm_domain_min_agaw(struct dmar_domain *domain)
3268{
3269 int i;
3270 int min_agaw = domain->agaw;
3271
3272 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3273 for (; i < g_num_of_iommus; ) {
3274 if (min_agaw > g_iommus[i]->agaw)
3275 min_agaw = g_iommus[i]->agaw;
3276
3277 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3278 }
3279
3280 return min_agaw;
3281}
3282
Weidong Han5e98c4b2008-12-08 23:03:27 +08003283static struct dmar_domain *iommu_alloc_vm_domain(void)
3284{
3285 struct dmar_domain *domain;
3286
3287 domain = alloc_domain_mem();
3288 if (!domain)
3289 return NULL;
3290
3291 domain->id = vm_domid++;
3292 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3293 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3294
3295 return domain;
3296}
3297
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003298static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003299{
3300 int adjust_width;
3301
3302 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3303 spin_lock_init(&domain->mapping_lock);
3304 spin_lock_init(&domain->iommu_lock);
3305
3306 domain_reserve_special_ranges(domain);
3307
3308 /* calculate AGAW */
3309 domain->gaw = guest_width;
3310 adjust_width = guestwidth_to_adjustwidth(guest_width);
3311 domain->agaw = width_to_agaw(adjust_width);
3312
3313 INIT_LIST_HEAD(&domain->devices);
3314
3315 domain->iommu_count = 0;
3316 domain->iommu_coherency = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003317 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003318
3319 /* always allocate the top pgd */
3320 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3321 if (!domain->pgd)
3322 return -ENOMEM;
3323 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3324 return 0;
3325}
3326
3327static void iommu_free_vm_domain(struct dmar_domain *domain)
3328{
3329 unsigned long flags;
3330 struct dmar_drhd_unit *drhd;
3331 struct intel_iommu *iommu;
3332 unsigned long i;
3333 unsigned long ndomains;
3334
3335 for_each_drhd_unit(drhd) {
3336 if (drhd->ignored)
3337 continue;
3338 iommu = drhd->iommu;
3339
3340 ndomains = cap_ndoms(iommu->cap);
3341 i = find_first_bit(iommu->domain_ids, ndomains);
3342 for (; i < ndomains; ) {
3343 if (iommu->domains[i] == domain) {
3344 spin_lock_irqsave(&iommu->lock, flags);
3345 clear_bit(i, iommu->domain_ids);
3346 iommu->domains[i] = NULL;
3347 spin_unlock_irqrestore(&iommu->lock, flags);
3348 break;
3349 }
3350 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3351 }
3352 }
3353}
3354
3355static void vm_domain_exit(struct dmar_domain *domain)
3356{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003357 /* Domain 0 is reserved, so dont process it */
3358 if (!domain)
3359 return;
3360
3361 vm_domain_remove_all_dev_info(domain);
3362 /* destroy iovas */
3363 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003364
3365 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003366 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003367
3368 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003369 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003370
3371 iommu_free_vm_domain(domain);
3372 free_domain_mem(domain);
3373}
3374
Joerg Roedel5d450802008-12-03 14:52:32 +01003375static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003376{
Joerg Roedel5d450802008-12-03 14:52:32 +01003377 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003378
Joerg Roedel5d450802008-12-03 14:52:32 +01003379 dmar_domain = iommu_alloc_vm_domain();
3380 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003381 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003382 "intel_iommu_domain_init: dmar_domain == NULL\n");
3383 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003384 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003385 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003386 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003387 "intel_iommu_domain_init() failed\n");
3388 vm_domain_exit(dmar_domain);
3389 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003390 }
Joerg Roedel5d450802008-12-03 14:52:32 +01003391 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003392
Joerg Roedel5d450802008-12-03 14:52:32 +01003393 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003394}
Kay, Allen M38717942008-09-09 18:37:29 +03003395
Joerg Roedel5d450802008-12-03 14:52:32 +01003396static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003397{
Joerg Roedel5d450802008-12-03 14:52:32 +01003398 struct dmar_domain *dmar_domain = domain->priv;
3399
3400 domain->priv = NULL;
3401 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003402}
Kay, Allen M38717942008-09-09 18:37:29 +03003403
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003404static int intel_iommu_attach_device(struct iommu_domain *domain,
3405 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003406{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003407 struct dmar_domain *dmar_domain = domain->priv;
3408 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003409 struct intel_iommu *iommu;
3410 int addr_width;
3411 u64 end;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003412 int ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003413
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003414 /* normally pdev is not mapped */
3415 if (unlikely(domain_context_mapped(pdev))) {
3416 struct dmar_domain *old_domain;
3417
3418 old_domain = find_domain(pdev);
3419 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003420 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3421 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3422 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003423 else
3424 domain_remove_dev_info(old_domain);
3425 }
3426 }
3427
David Woodhouse276dbf992009-04-04 01:45:37 +01003428 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3429 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003430 if (!iommu)
3431 return -ENODEV;
3432
3433 /* check if this iommu agaw is sufficient for max mapped address */
3434 addr_width = agaw_to_width(iommu->agaw);
3435 end = DOMAIN_MAX_ADDR(addr_width);
3436 end = end & VTD_PAGE_MASK;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003437 if (end < dmar_domain->max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003438 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3439 "sufficient for the mapped address (%llx)\n",
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003440 __func__, iommu->agaw, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003441 return -EFAULT;
3442 }
3443
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003444 ret = domain_add_dev_info(dmar_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003445 if (ret)
3446 return ret;
3447
Yu Zhao93a23a72009-05-18 13:51:37 +08003448 ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003449 return ret;
3450}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003451
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003452static void intel_iommu_detach_device(struct iommu_domain *domain,
3453 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003454{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003455 struct dmar_domain *dmar_domain = domain->priv;
3456 struct pci_dev *pdev = to_pci_dev(dev);
3457
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003458 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003459}
Kay, Allen M38717942008-09-09 18:37:29 +03003460
Joerg Roedeldde57a22008-12-03 15:04:09 +01003461static int intel_iommu_map_range(struct iommu_domain *domain,
3462 unsigned long iova, phys_addr_t hpa,
3463 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03003464{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003465 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003466 u64 max_addr;
3467 int addr_width;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003468 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003469 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003470
Joerg Roedeldde57a22008-12-03 15:04:09 +01003471 if (iommu_prot & IOMMU_READ)
3472 prot |= DMA_PTE_READ;
3473 if (iommu_prot & IOMMU_WRITE)
3474 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08003475 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3476 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003477
David Woodhouse163cc522009-06-28 00:51:17 +01003478 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003479 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003480 int min_agaw;
3481 u64 end;
3482
3483 /* check if minimum agaw is sufficient for mapped address */
Joerg Roedeldde57a22008-12-03 15:04:09 +01003484 min_agaw = vm_domain_min_agaw(dmar_domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003485 addr_width = agaw_to_width(min_agaw);
3486 end = DOMAIN_MAX_ADDR(addr_width);
3487 end = end & VTD_PAGE_MASK;
3488 if (end < max_addr) {
3489 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3490 "sufficient for the mapped address (%llx)\n",
3491 __func__, min_agaw, max_addr);
3492 return -EFAULT;
3493 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01003494 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003495 }
David Woodhousead051222009-06-28 14:22:28 +01003496 /* Round up size to next multiple of PAGE_SIZE, if it and
3497 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01003498 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01003499 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3500 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003501 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003502}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003503
Joerg Roedeldde57a22008-12-03 15:04:09 +01003504static void intel_iommu_unmap_range(struct iommu_domain *domain,
3505 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003506{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003507 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003508
David Woodhouse163cc522009-06-28 00:51:17 +01003509 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3510 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003511
David Woodhouse163cc522009-06-28 00:51:17 +01003512 if (dmar_domain->max_addr == iova + size)
3513 dmar_domain->max_addr = iova;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003514}
Kay, Allen M38717942008-09-09 18:37:29 +03003515
Joerg Roedeld14d6572008-12-03 15:06:57 +01003516static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3517 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03003518{
Joerg Roedeld14d6572008-12-03 15:06:57 +01003519 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03003520 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003521 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003522
David Woodhouseb026fd22009-06-28 10:37:25 +01003523 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
Kay, Allen M38717942008-09-09 18:37:29 +03003524 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003525 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03003526
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003527 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03003528}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003529
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003530static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3531 unsigned long cap)
3532{
3533 struct dmar_domain *dmar_domain = domain->priv;
3534
3535 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3536 return dmar_domain->iommu_snooping;
3537
3538 return 0;
3539}
3540
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003541static struct iommu_ops intel_iommu_ops = {
3542 .domain_init = intel_iommu_domain_init,
3543 .domain_destroy = intel_iommu_domain_destroy,
3544 .attach_dev = intel_iommu_attach_device,
3545 .detach_dev = intel_iommu_detach_device,
3546 .map = intel_iommu_map_range,
3547 .unmap = intel_iommu_unmap_range,
3548 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003549 .domain_has_cap = intel_iommu_domain_has_cap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003550};
David Woodhouse9af88142009-02-13 23:18:03 +00003551
3552static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3553{
3554 /*
3555 * Mobile 4 Series Chipset neglects to set RWBF capability,
3556 * but needs it:
3557 */
3558 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3559 rwbf_quirk = 1;
3560}
3561
3562DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);