blob: eec0d3e04bf578ab6a6afe36e1c55ad5d4b0c969 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040027#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070028#include <linux/slab.h>
29#include <linux/irq.h>
30#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070031#include <linux/spinlock.h>
32#include <linux/pci.h>
33#include <linux/dmar.h>
34#include <linux/dma-mapping.h>
35#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080044#include <linux/memblock.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070045#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090047#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070048
Joerg Roedel078e1ee2012-09-26 12:44:43 +020049#include "irq_remapping.h"
Varun Sethi61e015a2013-04-23 10:05:24 +053050#include "pci.h"
Joerg Roedel078e1ee2012-09-26 12:44:43 +020051
Fenghua Yu5b6985c2008-10-16 18:02:32 -070052#define ROOT_SIZE VTD_PAGE_SIZE
53#define CONTEXT_SIZE VTD_PAGE_SIZE
54
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
56#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070057#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058
59#define IOAPIC_RANGE_START (0xfee00000)
60#define IOAPIC_RANGE_END (0xfeefffff)
61#define IOVA_START_ADDR (0x1000)
62
63#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
64
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070065#define MAX_AGAW_WIDTH 64
66
David Woodhouse2ebe3152009-09-19 07:34:04 -070067#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
68#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
69
70/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
71 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
72#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
73 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
74#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070075
Mark McLoughlinf27be032008-11-20 15:49:43 +000076#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070077#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070078#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080079
Andrew Mortondf08cdc2010-09-22 13:05:11 -070080/* page table handling */
81#define LEVEL_STRIDE (9)
82#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
83
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020084/*
85 * This bitmap is used to advertise the page sizes our hardware support
86 * to the IOMMU core, which will then use this information to split
87 * physically contiguous memory regions it is mapping into page sizes
88 * that we support.
89 *
90 * Traditionally the IOMMU core just handed us the mappings directly,
91 * after making sure the size is an order of a 4KiB page and that the
92 * mapping has natural alignment.
93 *
94 * To retain this behavior, we currently advertise that we support
95 * all page sizes that are an order of 4KiB.
96 *
97 * If at some point we'd like to utilize the IOMMU core's new behavior,
98 * we could change this to advertise the real page sizes we support.
99 */
100#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
101
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700102static inline int agaw_to_level(int agaw)
103{
104 return agaw + 2;
105}
106
107static inline int agaw_to_width(int agaw)
108{
109 return 30 + agaw * LEVEL_STRIDE;
110}
111
112static inline int width_to_agaw(int width)
113{
114 return (width - 30) / LEVEL_STRIDE;
115}
116
117static inline unsigned int level_to_offset_bits(int level)
118{
119 return (level - 1) * LEVEL_STRIDE;
120}
121
122static inline int pfn_level_offset(unsigned long pfn, int level)
123{
124 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
125}
126
127static inline unsigned long level_mask(int level)
128{
129 return -1UL << level_to_offset_bits(level);
130}
131
132static inline unsigned long level_size(int level)
133{
134 return 1UL << level_to_offset_bits(level);
135}
136
137static inline unsigned long align_to_level(unsigned long pfn, int level)
138{
139 return (pfn + level_size(level) - 1) & level_mask(level);
140}
David Woodhousefd18de52009-05-10 23:57:41 +0100141
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100142static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
143{
144 return 1 << ((lvl - 1) * LEVEL_STRIDE);
145}
146
David Woodhousedd4e8312009-06-27 16:21:20 +0100147/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
148 are never going to work. */
149static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
150{
151 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
152}
153
154static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
155{
156 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
157}
158static inline unsigned long page_to_dma_pfn(struct page *pg)
159{
160 return mm_to_dma_pfn(page_to_pfn(pg));
161}
162static inline unsigned long virt_to_dma_pfn(void *p)
163{
164 return page_to_dma_pfn(virt_to_page(p));
165}
166
Weidong Hand9630fe2008-12-08 11:06:32 +0800167/* global iommu list, set NULL for ignored DMAR units */
168static struct intel_iommu **g_iommus;
169
David Woodhousee0fc7e02009-09-30 09:12:17 -0700170static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000171static int rwbf_quirk;
172
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000173/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700174 * set to 1 to panic kernel if can't successfully enable VT-d
175 * (used when kernel is launched w/ TXT)
176 */
177static int force_on = 0;
178
179/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000180 * 0: Present
181 * 1-11: Reserved
182 * 12-63: Context Ptr (12 - (haw-1))
183 * 64-127: Reserved
184 */
185struct root_entry {
186 u64 val;
187 u64 rsvd1;
188};
189#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
190static inline bool root_present(struct root_entry *root)
191{
192 return (root->val & 1);
193}
194static inline void set_root_present(struct root_entry *root)
195{
196 root->val |= 1;
197}
198static inline void set_root_value(struct root_entry *root, unsigned long value)
199{
200 root->val |= value & VTD_PAGE_MASK;
201}
202
203static inline struct context_entry *
204get_context_addr_from_root(struct root_entry *root)
205{
206 return (struct context_entry *)
207 (root_present(root)?phys_to_virt(
208 root->val & VTD_PAGE_MASK) :
209 NULL);
210}
211
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000212/*
213 * low 64 bits:
214 * 0: present
215 * 1: fault processing disable
216 * 2-3: translation type
217 * 12-63: address space root
218 * high 64 bits:
219 * 0-2: address width
220 * 3-6: aval
221 * 8-23: domain id
222 */
223struct context_entry {
224 u64 lo;
225 u64 hi;
226};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000227
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000228static inline bool context_present(struct context_entry *context)
229{
230 return (context->lo & 1);
231}
232static inline void context_set_present(struct context_entry *context)
233{
234 context->lo |= 1;
235}
236
237static inline void context_set_fault_enable(struct context_entry *context)
238{
239 context->lo &= (((u64)-1) << 2) | 1;
240}
241
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000242static inline void context_set_translation_type(struct context_entry *context,
243 unsigned long value)
244{
245 context->lo &= (((u64)-1) << 4) | 3;
246 context->lo |= (value & 3) << 2;
247}
248
249static inline void context_set_address_root(struct context_entry *context,
250 unsigned long value)
251{
252 context->lo |= value & VTD_PAGE_MASK;
253}
254
255static inline void context_set_address_width(struct context_entry *context,
256 unsigned long value)
257{
258 context->hi |= value & 7;
259}
260
261static inline void context_set_domain_id(struct context_entry *context,
262 unsigned long value)
263{
264 context->hi |= (value & ((1 << 16) - 1)) << 8;
265}
266
267static inline void context_clear_entry(struct context_entry *context)
268{
269 context->lo = 0;
270 context->hi = 0;
271}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000272
Mark McLoughlin622ba122008-11-20 15:49:46 +0000273/*
274 * 0: readable
275 * 1: writable
276 * 2-6: reserved
277 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800278 * 8-10: available
279 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000280 * 12-63: Host physcial address
281 */
282struct dma_pte {
283 u64 val;
284};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000285
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000286static inline void dma_clear_pte(struct dma_pte *pte)
287{
288 pte->val = 0;
289}
290
291static inline void dma_set_pte_readable(struct dma_pte *pte)
292{
293 pte->val |= DMA_PTE_READ;
294}
295
296static inline void dma_set_pte_writable(struct dma_pte *pte)
297{
298 pte->val |= DMA_PTE_WRITE;
299}
300
Sheng Yang9cf066972009-03-18 15:33:07 +0800301static inline void dma_set_pte_snp(struct dma_pte *pte)
302{
303 pte->val |= DMA_PTE_SNP;
304}
305
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000306static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
307{
308 pte->val = (pte->val & ~3) | (prot & 3);
309}
310
311static inline u64 dma_pte_addr(struct dma_pte *pte)
312{
David Woodhousec85994e2009-07-01 19:21:24 +0100313#ifdef CONFIG_64BIT
314 return pte->val & VTD_PAGE_MASK;
315#else
316 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100317 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100318#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000319}
320
David Woodhousedd4e8312009-06-27 16:21:20 +0100321static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000322{
David Woodhousedd4e8312009-06-27 16:21:20 +0100323 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000324}
325
326static inline bool dma_pte_present(struct dma_pte *pte)
327{
328 return (pte->val & 3) != 0;
329}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000330
Allen Kay4399c8b2011-10-14 12:32:46 -0700331static inline bool dma_pte_superpage(struct dma_pte *pte)
332{
333 return (pte->val & (1 << 7));
334}
335
David Woodhouse75e6bf92009-07-02 11:21:16 +0100336static inline int first_pte_in_page(struct dma_pte *pte)
337{
338 return !((unsigned long)pte & ~VTD_PAGE_MASK);
339}
340
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700341/*
342 * This domain is a statically identity mapping domain.
343 * 1. This domain creats a static 1:1 mapping to all usable memory.
344 * 2. It maps to each iommu if successful.
345 * 3. Each iommu mapps to this domain if successful.
346 */
David Woodhouse19943b02009-08-04 16:19:20 +0100347static struct dmar_domain *si_domain;
348static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700349
Weidong Han3b5410e2008-12-08 09:17:15 +0800350/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100351#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800352
Weidong Han1ce28fe2008-12-08 16:35:39 +0800353/* domain represents a virtual machine, more than one devices
354 * across iommus may be owned in one domain, e.g. kvm guest.
355 */
356#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
357
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700358/* si_domain contains mulitple devices */
359#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
360
Mike Travis1b198bb2012-03-05 15:05:16 -0800361/* define the limit of IOMMUs supported in each domain */
362#ifdef CONFIG_X86
363# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
364#else
365# define IOMMU_UNITS_SUPPORTED 64
366#endif
367
Mark McLoughlin99126f72008-11-20 15:49:47 +0000368struct dmar_domain {
369 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700370 int nid; /* node id */
Mike Travis1b198bb2012-03-05 15:05:16 -0800371 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
372 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000373
374 struct list_head devices; /* all devices' list */
375 struct iova_domain iovad; /* iova's that belong to this domain */
376
377 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000378 int gaw; /* max guest address width */
379
380 /* adjusted guest address width, 0 is level 2 30-bit */
381 int agaw;
382
Weidong Han3b5410e2008-12-08 09:17:15 +0800383 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800384
385 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800386 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800387 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100388 int iommu_superpage;/* Level of superpages supported:
389 0 == 4KiB (no superpages), 1 == 2MiB,
390 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800391 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800392 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000393};
394
Mark McLoughlina647dac2008-11-20 15:49:48 +0000395/* PCI domain-device relationship */
396struct device_domain_info {
397 struct list_head link; /* link to domain siblings */
398 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100399 int segment; /* PCI domain */
400 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000401 u8 devfn; /* PCI devfn number */
Stefan Assmann45e829e2009-12-03 06:49:24 -0500402 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800403 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000404 struct dmar_domain *domain; /* pointer to domain */
405};
406
mark gross5e0d2a62008-03-04 15:22:08 -0800407static void flush_unmaps_timeout(unsigned long data);
408
409DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
410
mark gross80b20dd2008-04-18 13:53:58 -0700411#define HIGH_WATER_MARK 250
412struct deferred_flush_tables {
413 int next;
414 struct iova *iova[HIGH_WATER_MARK];
415 struct dmar_domain *domain[HIGH_WATER_MARK];
416};
417
418static struct deferred_flush_tables *deferred_flush;
419
mark gross5e0d2a62008-03-04 15:22:08 -0800420/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800421static int g_num_of_iommus;
422
423static DEFINE_SPINLOCK(async_umap_flush_lock);
424static LIST_HEAD(unmaps_to_do);
425
426static int timer_on;
427static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800428
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700429static void domain_remove_dev_info(struct dmar_domain *domain);
430
Suresh Siddhad3f13812011-08-23 17:05:25 -0700431#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800432int dmar_disabled = 0;
433#else
434int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700435#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800436
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200437int intel_iommu_enabled = 0;
438EXPORT_SYMBOL_GPL(intel_iommu_enabled);
439
David Woodhouse2d9e6672010-06-15 10:57:57 +0100440static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700441static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800442static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100443static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700444
David Woodhousec0771df2011-10-14 20:59:46 +0100445int intel_iommu_gfx_mapped;
446EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
447
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700448#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
449static DEFINE_SPINLOCK(device_domain_lock);
450static LIST_HEAD(device_domain_list);
451
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100452static struct iommu_ops intel_iommu_ops;
453
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700454static int __init intel_iommu_setup(char *str)
455{
456 if (!str)
457 return -EINVAL;
458 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800459 if (!strncmp(str, "on", 2)) {
460 dmar_disabled = 0;
461 printk(KERN_INFO "Intel-IOMMU: enabled\n");
462 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700463 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800464 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700465 } else if (!strncmp(str, "igfx_off", 8)) {
466 dmar_map_gfx = 0;
467 printk(KERN_INFO
468 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700469 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800470 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700471 "Intel-IOMMU: Forcing DAC for PCI devices\n");
472 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800473 } else if (!strncmp(str, "strict", 6)) {
474 printk(KERN_INFO
475 "Intel-IOMMU: disable batched IOTLB flush\n");
476 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100477 } else if (!strncmp(str, "sp_off", 6)) {
478 printk(KERN_INFO
479 "Intel-IOMMU: disable supported super page\n");
480 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700481 }
482
483 str += strcspn(str, ",");
484 while (*str == ',')
485 str++;
486 }
487 return 0;
488}
489__setup("intel_iommu=", intel_iommu_setup);
490
491static struct kmem_cache *iommu_domain_cache;
492static struct kmem_cache *iommu_devinfo_cache;
493static struct kmem_cache *iommu_iova_cache;
494
Suresh Siddha4c923d42009-10-02 11:01:24 -0700495static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700496{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700497 struct page *page;
498 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700499
Suresh Siddha4c923d42009-10-02 11:01:24 -0700500 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
501 if (page)
502 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700503 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700504}
505
506static inline void free_pgtable_page(void *vaddr)
507{
508 free_page((unsigned long)vaddr);
509}
510
511static inline void *alloc_domain_mem(void)
512{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900513 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700514}
515
Kay, Allen M38717942008-09-09 18:37:29 +0300516static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700517{
518 kmem_cache_free(iommu_domain_cache, vaddr);
519}
520
521static inline void * alloc_devinfo_mem(void)
522{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900523 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700524}
525
526static inline void free_devinfo_mem(void *vaddr)
527{
528 kmem_cache_free(iommu_devinfo_cache, vaddr);
529}
530
531struct iova *alloc_iova_mem(void)
532{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900533 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700534}
535
536void free_iova_mem(struct iova *iova)
537{
538 kmem_cache_free(iommu_iova_cache, iova);
539}
540
Weidong Han1b573682008-12-08 15:34:06 +0800541
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700542static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800543{
544 unsigned long sagaw;
545 int agaw = -1;
546
547 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700548 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800549 agaw >= 0; agaw--) {
550 if (test_bit(agaw, &sagaw))
551 break;
552 }
553
554 return agaw;
555}
556
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700557/*
558 * Calculate max SAGAW for each iommu.
559 */
560int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
561{
562 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
563}
564
565/*
566 * calculate agaw for each iommu.
567 * "SAGAW" may be different across iommus, use a default agaw, and
568 * get a supported less agaw for iommus that don't support the default agaw.
569 */
570int iommu_calculate_agaw(struct intel_iommu *iommu)
571{
572 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
573}
574
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700575/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800576static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
577{
578 int iommu_id;
579
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700580 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800581 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700582 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800583
Mike Travis1b198bb2012-03-05 15:05:16 -0800584 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800585 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
586 return NULL;
587
588 return g_iommus[iommu_id];
589}
590
Weidong Han8e6040972008-12-08 15:49:06 +0800591static void domain_update_iommu_coherency(struct dmar_domain *domain)
592{
593 int i;
594
Alex Williamson2e12bc22011-11-11 17:26:44 -0700595 i = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
596
597 domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800598
Mike Travis1b198bb2012-03-05 15:05:16 -0800599 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Weidong Han8e6040972008-12-08 15:49:06 +0800600 if (!ecap_coherent(g_iommus[i]->ecap)) {
601 domain->iommu_coherency = 0;
602 break;
603 }
Weidong Han8e6040972008-12-08 15:49:06 +0800604 }
605}
606
Sheng Yang58c610b2009-03-18 15:33:05 +0800607static void domain_update_iommu_snooping(struct dmar_domain *domain)
608{
609 int i;
610
611 domain->iommu_snooping = 1;
612
Mike Travis1b198bb2012-03-05 15:05:16 -0800613 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800614 if (!ecap_sc_support(g_iommus[i]->ecap)) {
615 domain->iommu_snooping = 0;
616 break;
617 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800618 }
619}
620
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100621static void domain_update_iommu_superpage(struct dmar_domain *domain)
622{
Allen Kay8140a952011-10-14 12:32:17 -0700623 struct dmar_drhd_unit *drhd;
624 struct intel_iommu *iommu = NULL;
625 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100626
627 if (!intel_iommu_superpage) {
628 domain->iommu_superpage = 0;
629 return;
630 }
631
Allen Kay8140a952011-10-14 12:32:17 -0700632 /* set iommu_superpage to the smallest common denominator */
633 for_each_active_iommu(iommu, drhd) {
634 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100635 if (!mask) {
636 break;
637 }
638 }
639 domain->iommu_superpage = fls(mask);
640}
641
Sheng Yang58c610b2009-03-18 15:33:05 +0800642/* Some capabilities may be different across iommus */
643static void domain_update_iommu_cap(struct dmar_domain *domain)
644{
645 domain_update_iommu_coherency(domain);
646 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100647 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800648}
649
David Woodhouse276dbf992009-04-04 01:45:37 +0100650static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800651{
652 struct dmar_drhd_unit *drhd = NULL;
653 int i;
654
655 for_each_drhd_unit(drhd) {
656 if (drhd->ignored)
657 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100658 if (segment != drhd->segment)
659 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800660
David Woodhouse924b6232009-04-04 00:39:25 +0100661 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000662 if (drhd->devices[i] &&
663 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800664 drhd->devices[i]->devfn == devfn)
665 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700666 if (drhd->devices[i] &&
667 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100668 drhd->devices[i]->subordinate->number <= bus &&
Yinghai Lub918c622012-05-17 18:51:11 -0700669 drhd->devices[i]->subordinate->busn_res.end >= bus)
David Woodhouse924b6232009-04-04 00:39:25 +0100670 return drhd->iommu;
671 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800672
673 if (drhd->include_all)
674 return drhd->iommu;
675 }
676
677 return NULL;
678}
679
Weidong Han5331fe62008-12-08 23:00:00 +0800680static void domain_flush_cache(struct dmar_domain *domain,
681 void *addr, int size)
682{
683 if (!domain->iommu_coherency)
684 clflush_cache_range(addr, size);
685}
686
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700687/* Gets context entry for a given bus and devfn */
688static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
689 u8 bus, u8 devfn)
690{
691 struct root_entry *root;
692 struct context_entry *context;
693 unsigned long phy_addr;
694 unsigned long flags;
695
696 spin_lock_irqsave(&iommu->lock, flags);
697 root = &iommu->root_entry[bus];
698 context = get_context_addr_from_root(root);
699 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700700 context = (struct context_entry *)
701 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700702 if (!context) {
703 spin_unlock_irqrestore(&iommu->lock, flags);
704 return NULL;
705 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700706 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700707 phy_addr = virt_to_phys((void *)context);
708 set_root_value(root, phy_addr);
709 set_root_present(root);
710 __iommu_flush_cache(iommu, root, sizeof(*root));
711 }
712 spin_unlock_irqrestore(&iommu->lock, flags);
713 return &context[devfn];
714}
715
716static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
717{
718 struct root_entry *root;
719 struct context_entry *context;
720 int ret;
721 unsigned long flags;
722
723 spin_lock_irqsave(&iommu->lock, flags);
724 root = &iommu->root_entry[bus];
725 context = get_context_addr_from_root(root);
726 if (!context) {
727 ret = 0;
728 goto out;
729 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000730 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700731out:
732 spin_unlock_irqrestore(&iommu->lock, flags);
733 return ret;
734}
735
736static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
737{
738 struct root_entry *root;
739 struct context_entry *context;
740 unsigned long flags;
741
742 spin_lock_irqsave(&iommu->lock, flags);
743 root = &iommu->root_entry[bus];
744 context = get_context_addr_from_root(root);
745 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000746 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700747 __iommu_flush_cache(iommu, &context[devfn], \
748 sizeof(*context));
749 }
750 spin_unlock_irqrestore(&iommu->lock, flags);
751}
752
753static void free_context_table(struct intel_iommu *iommu)
754{
755 struct root_entry *root;
756 int i;
757 unsigned long flags;
758 struct context_entry *context;
759
760 spin_lock_irqsave(&iommu->lock, flags);
761 if (!iommu->root_entry) {
762 goto out;
763 }
764 for (i = 0; i < ROOT_ENTRY_NR; i++) {
765 root = &iommu->root_entry[i];
766 context = get_context_addr_from_root(root);
767 if (context)
768 free_pgtable_page(context);
769 }
770 free_pgtable_page(iommu->root_entry);
771 iommu->root_entry = NULL;
772out:
773 spin_unlock_irqrestore(&iommu->lock, flags);
774}
775
David Woodhouseb026fd22009-06-28 10:37:25 +0100776static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
Allen Kay4399c8b2011-10-14 12:32:46 -0700777 unsigned long pfn, int target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700778{
David Woodhouseb026fd22009-06-28 10:37:25 +0100779 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700780 struct dma_pte *parent, *pte = NULL;
781 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700782 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700783
784 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100785 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700786 parent = domain->pgd;
787
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700788 while (level > 0) {
789 void *tmp_page;
790
David Woodhouseb026fd22009-06-28 10:37:25 +0100791 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700792 pte = &parent[offset];
Allen Kay4399c8b2011-10-14 12:32:46 -0700793 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100794 break;
795 if (level == target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700796 break;
797
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000798 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100799 uint64_t pteval;
800
Suresh Siddha4c923d42009-10-02 11:01:24 -0700801 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700802
David Woodhouse206a73c12009-07-01 19:30:28 +0100803 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700804 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100805
David Woodhousec85994e2009-07-01 19:21:24 +0100806 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400807 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100808 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
809 /* Someone else set it while we were thinking; use theirs. */
810 free_pgtable_page(tmp_page);
811 } else {
812 dma_pte_addr(pte);
813 domain_flush_cache(domain, pte, sizeof(*pte));
814 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700815 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000816 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700817 level--;
818 }
819
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700820 return pte;
821}
822
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100823
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700824/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100825static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
826 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100827 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700828{
829 struct dma_pte *parent, *pte = NULL;
830 int total = agaw_to_level(domain->agaw);
831 int offset;
832
833 parent = domain->pgd;
834 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100835 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700836 pte = &parent[offset];
837 if (level == total)
838 return pte;
839
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100840 if (!dma_pte_present(pte)) {
841 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700842 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100843 }
844
845 if (pte->val & DMA_PTE_LARGE_PAGE) {
846 *large_page = total;
847 return pte;
848 }
849
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000850 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700851 total--;
852 }
853 return NULL;
854}
855
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700856/* clear last level pte, a tlb flush should be followed */
Allen Kay292827c2011-10-14 12:31:54 -0700857static int dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100858 unsigned long start_pfn,
859 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700860{
David Woodhouse04b18e62009-06-27 19:15:01 +0100861 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100862 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100863 struct dma_pte *first_pte, *pte;
Allen Kay292827c2011-10-14 12:31:54 -0700864 int order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700865
David Woodhouse04b18e62009-06-27 19:15:01 +0100866 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100867 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700868 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100869
David Woodhouse04b18e62009-06-27 19:15:01 +0100870 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700871 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100872 large_page = 1;
873 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100874 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100875 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100876 continue;
877 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100878 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100879 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100880 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100881 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100882 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
883
David Woodhouse310a5ab2009-06-28 18:52:20 +0100884 domain_flush_cache(domain, first_pte,
885 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700886
887 } while (start_pfn && start_pfn <= last_pfn);
Allen Kay292827c2011-10-14 12:31:54 -0700888
889 order = (large_page - 1) * 9;
890 return order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700891}
892
893/* free page table pages. last level pte should already be cleared */
894static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100895 unsigned long start_pfn,
896 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700897{
David Woodhouse6660c632009-06-27 22:41:00 +0100898 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhousef3a0a522009-06-30 03:40:07 +0100899 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700900 int total = agaw_to_level(domain->agaw);
901 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100902 unsigned long tmp;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100903 int large_page = 2;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700904
David Woodhouse6660c632009-06-27 22:41:00 +0100905 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
906 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700907 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700908
David Woodhousef3a0a522009-06-30 03:40:07 +0100909 /* We don't need lock here; nobody else touches the iova range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700910 level = 2;
911 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100912 tmp = align_to_level(start_pfn, level);
913
David Woodhousef3a0a522009-06-30 03:40:07 +0100914 /* If we can't even clear one PTE at this level, we're done */
David Woodhouse6660c632009-06-27 22:41:00 +0100915 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700916 return;
917
David Woodhouse59c36282009-09-19 07:36:28 -0700918 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100919 large_page = level;
920 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
921 if (large_page > level)
922 level = large_page + 1;
David Woodhousef3a0a522009-06-30 03:40:07 +0100923 if (!pte) {
924 tmp = align_to_level(tmp + 1, level + 1);
925 continue;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700926 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100927 do {
David Woodhouse6a43e572009-07-02 12:02:34 +0100928 if (dma_pte_present(pte)) {
929 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
930 dma_clear_pte(pte);
931 }
David Woodhousef3a0a522009-06-30 03:40:07 +0100932 pte++;
933 tmp += level_size(level);
David Woodhouse75e6bf92009-07-02 11:21:16 +0100934 } while (!first_pte_in_page(pte) &&
935 tmp + level_size(level) - 1 <= last_pfn);
936
David Woodhousef3a0a522009-06-30 03:40:07 +0100937 domain_flush_cache(domain, first_pte,
938 (void *)pte - (void *)first_pte);
939
David Woodhouse59c36282009-09-19 07:36:28 -0700940 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700941 level++;
942 }
943 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100944 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700945 free_pgtable_page(domain->pgd);
946 domain->pgd = NULL;
947 }
948}
949
950/* iommu handling */
951static int iommu_alloc_root_entry(struct intel_iommu *iommu)
952{
953 struct root_entry *root;
954 unsigned long flags;
955
Suresh Siddha4c923d42009-10-02 11:01:24 -0700956 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700957 if (!root)
958 return -ENOMEM;
959
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700960 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700961
962 spin_lock_irqsave(&iommu->lock, flags);
963 iommu->root_entry = root;
964 spin_unlock_irqrestore(&iommu->lock, flags);
965
966 return 0;
967}
968
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700969static void iommu_set_root_entry(struct intel_iommu *iommu)
970{
971 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100972 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700973 unsigned long flag;
974
975 addr = iommu->root_entry;
976
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200977 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700978 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
979
David Woodhousec416daa2009-05-10 20:30:58 +0100980 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700981
982 /* Make sure hardware complete it */
983 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100984 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700985
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200986 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700987}
988
989static void iommu_flush_write_buffer(struct intel_iommu *iommu)
990{
991 u32 val;
992 unsigned long flag;
993
David Woodhouse9af88142009-02-13 23:18:03 +0000994 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700995 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700996
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200997 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100998 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700999
1000 /* Make sure hardware complete it */
1001 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001002 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001003
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001004 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001005}
1006
1007/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001008static void __iommu_flush_context(struct intel_iommu *iommu,
1009 u16 did, u16 source_id, u8 function_mask,
1010 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001011{
1012 u64 val = 0;
1013 unsigned long flag;
1014
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001015 switch (type) {
1016 case DMA_CCMD_GLOBAL_INVL:
1017 val = DMA_CCMD_GLOBAL_INVL;
1018 break;
1019 case DMA_CCMD_DOMAIN_INVL:
1020 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1021 break;
1022 case DMA_CCMD_DEVICE_INVL:
1023 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1024 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1025 break;
1026 default:
1027 BUG();
1028 }
1029 val |= DMA_CCMD_ICC;
1030
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001031 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001032 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1033
1034 /* Make sure hardware complete it */
1035 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1036 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1037
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001038 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001039}
1040
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001041/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001042static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1043 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001044{
1045 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1046 u64 val = 0, val_iva = 0;
1047 unsigned long flag;
1048
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001049 switch (type) {
1050 case DMA_TLB_GLOBAL_FLUSH:
1051 /* global flush doesn't need set IVA_REG */
1052 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1053 break;
1054 case DMA_TLB_DSI_FLUSH:
1055 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1056 break;
1057 case DMA_TLB_PSI_FLUSH:
1058 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1059 /* Note: always flush non-leaf currently */
1060 val_iva = size_order | addr;
1061 break;
1062 default:
1063 BUG();
1064 }
1065 /* Note: set drain read/write */
1066#if 0
1067 /*
1068 * This is probably to be super secure.. Looks like we can
1069 * ignore it without any impact.
1070 */
1071 if (cap_read_drain(iommu->cap))
1072 val |= DMA_TLB_READ_DRAIN;
1073#endif
1074 if (cap_write_drain(iommu->cap))
1075 val |= DMA_TLB_WRITE_DRAIN;
1076
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001077 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001078 /* Note: Only uses first TLB reg currently */
1079 if (val_iva)
1080 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1081 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1082
1083 /* Make sure hardware complete it */
1084 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1085 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1086
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001087 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001088
1089 /* check IOTLB invalidation granularity */
1090 if (DMA_TLB_IAIG(val) == 0)
1091 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1092 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1093 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001094 (unsigned long long)DMA_TLB_IIRG(type),
1095 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001096}
1097
Yu Zhao93a23a72009-05-18 13:51:37 +08001098static struct device_domain_info *iommu_support_dev_iotlb(
1099 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001100{
Yu Zhao93a23a72009-05-18 13:51:37 +08001101 int found = 0;
1102 unsigned long flags;
1103 struct device_domain_info *info;
1104 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1105
1106 if (!ecap_dev_iotlb_support(iommu->ecap))
1107 return NULL;
1108
1109 if (!iommu->qi)
1110 return NULL;
1111
1112 spin_lock_irqsave(&device_domain_lock, flags);
1113 list_for_each_entry(info, &domain->devices, link)
1114 if (info->bus == bus && info->devfn == devfn) {
1115 found = 1;
1116 break;
1117 }
1118 spin_unlock_irqrestore(&device_domain_lock, flags);
1119
1120 if (!found || !info->dev)
1121 return NULL;
1122
1123 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1124 return NULL;
1125
1126 if (!dmar_find_matched_atsr_unit(info->dev))
1127 return NULL;
1128
1129 info->iommu = iommu;
1130
1131 return info;
1132}
1133
1134static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1135{
1136 if (!info)
1137 return;
1138
1139 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1140}
1141
1142static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1143{
1144 if (!info->dev || !pci_ats_enabled(info->dev))
1145 return;
1146
1147 pci_disable_ats(info->dev);
1148}
1149
1150static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1151 u64 addr, unsigned mask)
1152{
1153 u16 sid, qdep;
1154 unsigned long flags;
1155 struct device_domain_info *info;
1156
1157 spin_lock_irqsave(&device_domain_lock, flags);
1158 list_for_each_entry(info, &domain->devices, link) {
1159 if (!info->dev || !pci_ats_enabled(info->dev))
1160 continue;
1161
1162 sid = info->bus << 8 | info->devfn;
1163 qdep = pci_ats_queue_depth(info->dev);
1164 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1165 }
1166 spin_unlock_irqrestore(&device_domain_lock, flags);
1167}
1168
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001169static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
Nadav Amit82653632010-04-01 13:24:40 +03001170 unsigned long pfn, unsigned int pages, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001171{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001172 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001173 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001174
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001175 BUG_ON(pages == 0);
1176
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001177 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001178 * Fallback to domain selective flush if no PSI support or the size is
1179 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001180 * PSI requires page size to be 2 ^ x, and the base address is naturally
1181 * aligned to the size
1182 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001183 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1184 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001185 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001186 else
1187 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1188 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001189
1190 /*
Nadav Amit82653632010-04-01 13:24:40 +03001191 * In caching mode, changes of pages from non-present to present require
1192 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001193 */
Nadav Amit82653632010-04-01 13:24:40 +03001194 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001195 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001196}
1197
mark grossf8bab732008-02-08 04:18:38 -08001198static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1199{
1200 u32 pmen;
1201 unsigned long flags;
1202
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001203 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001204 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1205 pmen &= ~DMA_PMEN_EPM;
1206 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1207
1208 /* wait for the protected region status bit to clear */
1209 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1210 readl, !(pmen & DMA_PMEN_PRS), pmen);
1211
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001212 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001213}
1214
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001215static int iommu_enable_translation(struct intel_iommu *iommu)
1216{
1217 u32 sts;
1218 unsigned long flags;
1219
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001220 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001221 iommu->gcmd |= DMA_GCMD_TE;
1222 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001223
1224 /* Make sure hardware complete it */
1225 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001226 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001227
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001228 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001229 return 0;
1230}
1231
1232static int iommu_disable_translation(struct intel_iommu *iommu)
1233{
1234 u32 sts;
1235 unsigned long flag;
1236
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001237 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001238 iommu->gcmd &= ~DMA_GCMD_TE;
1239 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1240
1241 /* Make sure hardware complete it */
1242 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001243 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001244
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001245 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001246 return 0;
1247}
1248
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001249
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001250static int iommu_init_domains(struct intel_iommu *iommu)
1251{
1252 unsigned long ndomains;
1253 unsigned long nlongs;
1254
1255 ndomains = cap_ndoms(iommu->cap);
Masanari Iida68aeb962012-01-25 00:25:52 +09001256 pr_debug("IOMMU %d: Number of Domains supported <%ld>\n", iommu->seq_id,
Yinghai Lu680a7522010-04-08 19:58:23 +01001257 ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001258 nlongs = BITS_TO_LONGS(ndomains);
1259
Donald Dutile94a91b52009-08-20 16:51:34 -04001260 spin_lock_init(&iommu->lock);
1261
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001262 /* TBD: there might be 64K domains,
1263 * consider other allocation for future chip
1264 */
1265 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1266 if (!iommu->domain_ids) {
1267 printk(KERN_ERR "Allocating domain id array failed\n");
1268 return -ENOMEM;
1269 }
1270 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1271 GFP_KERNEL);
1272 if (!iommu->domains) {
1273 printk(KERN_ERR "Allocating domain array failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001274 return -ENOMEM;
1275 }
1276
1277 /*
1278 * if Caching mode is set, then invalid translations are tagged
1279 * with domainid 0. Hence we need to pre-allocate it.
1280 */
1281 if (cap_caching_mode(iommu->cap))
1282 set_bit(0, iommu->domain_ids);
1283 return 0;
1284}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001285
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001286
1287static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001288static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001289
1290void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001291{
1292 struct dmar_domain *domain;
1293 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001294 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001295
Donald Dutile94a91b52009-08-20 16:51:34 -04001296 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001297 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Donald Dutile94a91b52009-08-20 16:51:34 -04001298 domain = iommu->domains[i];
1299 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001300
Donald Dutile94a91b52009-08-20 16:51:34 -04001301 spin_lock_irqsave(&domain->iommu_lock, flags);
1302 if (--domain->iommu_count == 0) {
1303 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1304 vm_domain_exit(domain);
1305 else
1306 domain_exit(domain);
1307 }
1308 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001309 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001310 }
1311
1312 if (iommu->gcmd & DMA_GCMD_TE)
1313 iommu_disable_translation(iommu);
1314
1315 if (iommu->irq) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001316 irq_set_handler_data(iommu->irq, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001317 /* This will mask the irq */
1318 free_irq(iommu->irq, iommu);
1319 destroy_irq(iommu->irq);
1320 }
1321
1322 kfree(iommu->domains);
1323 kfree(iommu->domain_ids);
1324
Weidong Hand9630fe2008-12-08 11:06:32 +08001325 g_iommus[iommu->seq_id] = NULL;
1326
1327 /* if all iommus are freed, free g_iommus */
1328 for (i = 0; i < g_num_of_iommus; i++) {
1329 if (g_iommus[i])
1330 break;
1331 }
1332
1333 if (i == g_num_of_iommus)
1334 kfree(g_iommus);
1335
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001336 /* free context mapping */
1337 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001338}
1339
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001340static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001341{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001342 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001343
1344 domain = alloc_domain_mem();
1345 if (!domain)
1346 return NULL;
1347
Suresh Siddha4c923d42009-10-02 11:01:24 -07001348 domain->nid = -1;
Mike Travis1b198bb2012-03-05 15:05:16 -08001349 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Hand71a2f32008-12-07 21:13:41 +08001350 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001351
1352 return domain;
1353}
1354
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001355static int iommu_attach_domain(struct dmar_domain *domain,
1356 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001357{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001358 int num;
1359 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001360 unsigned long flags;
1361
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001362 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001363
1364 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001365
1366 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1367 if (num >= ndomains) {
1368 spin_unlock_irqrestore(&iommu->lock, flags);
1369 printk(KERN_ERR "IOMMU: no free domain ids\n");
1370 return -ENOMEM;
1371 }
1372
1373 domain->id = num;
1374 set_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001375 set_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001376 iommu->domains[num] = domain;
1377 spin_unlock_irqrestore(&iommu->lock, flags);
1378
1379 return 0;
1380}
1381
1382static void iommu_detach_domain(struct dmar_domain *domain,
1383 struct intel_iommu *iommu)
1384{
1385 unsigned long flags;
1386 int num, ndomains;
1387 int found = 0;
1388
1389 spin_lock_irqsave(&iommu->lock, flags);
1390 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001391 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001392 if (iommu->domains[num] == domain) {
1393 found = 1;
1394 break;
1395 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001396 }
1397
1398 if (found) {
1399 clear_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001400 clear_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001401 iommu->domains[num] = NULL;
1402 }
Weidong Han8c11e792008-12-08 15:29:22 +08001403 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001404}
1405
1406static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001407static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001408
Joseph Cihula51a63e62011-03-21 11:04:24 -07001409static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001410{
1411 struct pci_dev *pdev = NULL;
1412 struct iova *iova;
1413 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001414
David Millerf6611972008-02-06 01:36:23 -08001415 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001416
Mark Gross8a443df2008-03-04 14:59:31 -08001417 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1418 &reserved_rbtree_key);
1419
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001420 /* IOAPIC ranges shouldn't be accessed by DMA */
1421 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1422 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001423 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001424 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001425 return -ENODEV;
1426 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001427
1428 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1429 for_each_pci_dev(pdev) {
1430 struct resource *r;
1431
1432 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1433 r = &pdev->resource[i];
1434 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1435 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001436 iova = reserve_iova(&reserved_iova_list,
1437 IOVA_PFN(r->start),
1438 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001439 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001440 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001441 return -ENODEV;
1442 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001443 }
1444 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001445 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001446}
1447
1448static void domain_reserve_special_ranges(struct dmar_domain *domain)
1449{
1450 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1451}
1452
1453static inline int guestwidth_to_adjustwidth(int gaw)
1454{
1455 int agaw;
1456 int r = (gaw - 12) % 9;
1457
1458 if (r == 0)
1459 agaw = gaw;
1460 else
1461 agaw = gaw + 9 - r;
1462 if (agaw > 64)
1463 agaw = 64;
1464 return agaw;
1465}
1466
1467static int domain_init(struct dmar_domain *domain, int guest_width)
1468{
1469 struct intel_iommu *iommu;
1470 int adjust_width, agaw;
1471 unsigned long sagaw;
1472
David Millerf6611972008-02-06 01:36:23 -08001473 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001474 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001475
1476 domain_reserve_special_ranges(domain);
1477
1478 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001479 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001480 if (guest_width > cap_mgaw(iommu->cap))
1481 guest_width = cap_mgaw(iommu->cap);
1482 domain->gaw = guest_width;
1483 adjust_width = guestwidth_to_adjustwidth(guest_width);
1484 agaw = width_to_agaw(adjust_width);
1485 sagaw = cap_sagaw(iommu->cap);
1486 if (!test_bit(agaw, &sagaw)) {
1487 /* hardware doesn't support it, choose a bigger one */
1488 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1489 agaw = find_next_bit(&sagaw, 5, agaw);
1490 if (agaw >= 5)
1491 return -ENODEV;
1492 }
1493 domain->agaw = agaw;
1494 INIT_LIST_HEAD(&domain->devices);
1495
Weidong Han8e6040972008-12-08 15:49:06 +08001496 if (ecap_coherent(iommu->ecap))
1497 domain->iommu_coherency = 1;
1498 else
1499 domain->iommu_coherency = 0;
1500
Sheng Yang58c610b2009-03-18 15:33:05 +08001501 if (ecap_sc_support(iommu->ecap))
1502 domain->iommu_snooping = 1;
1503 else
1504 domain->iommu_snooping = 0;
1505
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001506 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
Weidong Hanc7151a82008-12-08 22:51:37 +08001507 domain->iommu_count = 1;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001508 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001509
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001510 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001511 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001512 if (!domain->pgd)
1513 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001514 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001515 return 0;
1516}
1517
1518static void domain_exit(struct dmar_domain *domain)
1519{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001520 struct dmar_drhd_unit *drhd;
1521 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001522
1523 /* Domain 0 is reserved, so dont process it */
1524 if (!domain)
1525 return;
1526
Alex Williamson7b668352011-05-24 12:02:41 +01001527 /* Flush any lazy unmaps that may reference this domain */
1528 if (!intel_iommu_strict)
1529 flush_unmaps_timeout(0);
1530
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001531 domain_remove_dev_info(domain);
1532 /* destroy iovas */
1533 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001534
1535 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001536 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001537
1538 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001539 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001540
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001541 for_each_active_iommu(iommu, drhd)
Mike Travis1b198bb2012-03-05 15:05:16 -08001542 if (test_bit(iommu->seq_id, domain->iommu_bmp))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001543 iommu_detach_domain(domain, iommu);
1544
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001545 free_domain_mem(domain);
1546}
1547
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001548static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1549 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001550{
1551 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001552 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001553 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001554 struct dma_pte *pgd;
1555 unsigned long num;
1556 unsigned long ndomains;
1557 int id;
1558 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001559 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001560
1561 pr_debug("Set context mapping for %02x:%02x.%d\n",
1562 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001563
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001564 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001565 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1566 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001567
David Woodhouse276dbf992009-04-04 01:45:37 +01001568 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001569 if (!iommu)
1570 return -ENODEV;
1571
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001572 context = device_to_context_entry(iommu, bus, devfn);
1573 if (!context)
1574 return -ENOMEM;
1575 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001576 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001577 spin_unlock_irqrestore(&iommu->lock, flags);
1578 return 0;
1579 }
1580
Weidong Hanea6606b2008-12-08 23:08:15 +08001581 id = domain->id;
1582 pgd = domain->pgd;
1583
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001584 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1585 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001586 int found = 0;
1587
1588 /* find an available domain id for this device in iommu */
1589 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001590 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001591 if (iommu->domains[num] == domain) {
1592 id = num;
1593 found = 1;
1594 break;
1595 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001596 }
1597
1598 if (found == 0) {
1599 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1600 if (num >= ndomains) {
1601 spin_unlock_irqrestore(&iommu->lock, flags);
1602 printk(KERN_ERR "IOMMU: no free domain ids\n");
1603 return -EFAULT;
1604 }
1605
1606 set_bit(num, iommu->domain_ids);
1607 iommu->domains[num] = domain;
1608 id = num;
1609 }
1610
1611 /* Skip top levels of page tables for
1612 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001613 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001614 */
Chris Wright1672af12009-12-02 12:06:34 -08001615 if (translation != CONTEXT_TT_PASS_THROUGH) {
1616 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1617 pgd = phys_to_virt(dma_pte_addr(pgd));
1618 if (!dma_pte_present(pgd)) {
1619 spin_unlock_irqrestore(&iommu->lock, flags);
1620 return -ENOMEM;
1621 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001622 }
1623 }
1624 }
1625
1626 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001627
Yu Zhao93a23a72009-05-18 13:51:37 +08001628 if (translation != CONTEXT_TT_PASS_THROUGH) {
1629 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1630 translation = info ? CONTEXT_TT_DEV_IOTLB :
1631 CONTEXT_TT_MULTI_LEVEL;
1632 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001633 /*
1634 * In pass through mode, AW must be programmed to indicate the largest
1635 * AGAW value supported by hardware. And ASR is ignored by hardware.
1636 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001637 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001638 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001639 else {
1640 context_set_address_root(context, virt_to_phys(pgd));
1641 context_set_address_width(context, iommu->agaw);
1642 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001643
1644 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001645 context_set_fault_enable(context);
1646 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001647 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001648
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001649 /*
1650 * It's a non-present to present mapping. If hardware doesn't cache
1651 * non-present entry we only need to flush the write-buffer. If the
1652 * _does_ cache non-present entries, then it does so in the special
1653 * domain #0, which we have to flush:
1654 */
1655 if (cap_caching_mode(iommu->cap)) {
1656 iommu->flush.flush_context(iommu, 0,
1657 (((u16)bus) << 8) | devfn,
1658 DMA_CCMD_MASK_NOBIT,
1659 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001660 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001661 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001662 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001663 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001664 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001665 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001666
1667 spin_lock_irqsave(&domain->iommu_lock, flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08001668 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08001669 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001670 if (domain->iommu_count == 1)
1671 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001672 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001673 }
1674 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001675 return 0;
1676}
1677
1678static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001679domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1680 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001681{
1682 int ret;
1683 struct pci_dev *tmp, *parent;
1684
David Woodhouse276dbf992009-04-04 01:45:37 +01001685 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001686 pdev->bus->number, pdev->devfn,
1687 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001688 if (ret)
1689 return ret;
1690
1691 /* dependent device mapping */
1692 tmp = pci_find_upstream_pcie_bridge(pdev);
1693 if (!tmp)
1694 return 0;
1695 /* Secondary interface's bus number and devfn 0 */
1696 parent = pdev->bus->self;
1697 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001698 ret = domain_context_mapping_one(domain,
1699 pci_domain_nr(parent->bus),
1700 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001701 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001702 if (ret)
1703 return ret;
1704 parent = parent->bus->self;
1705 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001706 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001707 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001708 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001709 tmp->subordinate->number, 0,
1710 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001711 else /* this is a legacy PCI bridge */
1712 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001713 pci_domain_nr(tmp->bus),
1714 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001715 tmp->devfn,
1716 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001717}
1718
Weidong Han5331fe62008-12-08 23:00:00 +08001719static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001720{
1721 int ret;
1722 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001723 struct intel_iommu *iommu;
1724
David Woodhouse276dbf992009-04-04 01:45:37 +01001725 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1726 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001727 if (!iommu)
1728 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001729
David Woodhouse276dbf992009-04-04 01:45:37 +01001730 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001731 if (!ret)
1732 return ret;
1733 /* dependent device mapping */
1734 tmp = pci_find_upstream_pcie_bridge(pdev);
1735 if (!tmp)
1736 return ret;
1737 /* Secondary interface's bus number and devfn 0 */
1738 parent = pdev->bus->self;
1739 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001740 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001741 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001742 if (!ret)
1743 return ret;
1744 parent = parent->bus->self;
1745 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001746 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001747 return device_context_mapped(iommu, tmp->subordinate->number,
1748 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001749 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001750 return device_context_mapped(iommu, tmp->bus->number,
1751 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001752}
1753
Fenghua Yuf5329592009-08-04 15:09:37 -07001754/* Returns a number of VTD pages, but aligned to MM page size */
1755static inline unsigned long aligned_nrpages(unsigned long host_addr,
1756 size_t size)
1757{
1758 host_addr &= ~PAGE_MASK;
1759 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1760}
1761
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001762/* Return largest possible superpage level for a given mapping */
1763static inline int hardware_largepage_caps(struct dmar_domain *domain,
1764 unsigned long iov_pfn,
1765 unsigned long phy_pfn,
1766 unsigned long pages)
1767{
1768 int support, level = 1;
1769 unsigned long pfnmerge;
1770
1771 support = domain->iommu_superpage;
1772
1773 /* To use a large page, the virtual *and* physical addresses
1774 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1775 of them will mean we have to use smaller pages. So just
1776 merge them and check both at once. */
1777 pfnmerge = iov_pfn | phy_pfn;
1778
1779 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1780 pages >>= VTD_STRIDE_SHIFT;
1781 if (!pages)
1782 break;
1783 pfnmerge >>= VTD_STRIDE_SHIFT;
1784 level++;
1785 support--;
1786 }
1787 return level;
1788}
1789
David Woodhouse9051aa02009-06-29 12:30:54 +01001790static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1791 struct scatterlist *sg, unsigned long phys_pfn,
1792 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001793{
1794 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001795 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001796 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001797 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001798 unsigned int largepage_lvl = 0;
1799 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001800
1801 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1802
1803 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1804 return -EINVAL;
1805
1806 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1807
David Woodhouse9051aa02009-06-29 12:30:54 +01001808 if (sg)
1809 sg_res = 0;
1810 else {
1811 sg_res = nr_pages + 1;
1812 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1813 }
1814
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001815 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001816 uint64_t tmp;
1817
David Woodhousee1605492009-06-29 11:17:38 +01001818 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001819 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001820 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1821 sg->dma_length = sg->length;
1822 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001823 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001824 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001825
David Woodhousee1605492009-06-29 11:17:38 +01001826 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001827 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1828
1829 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01001830 if (!pte)
1831 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001832 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001833 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001834 pteval |= DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001835 /* Ensure that old small page tables are removed to make room
1836 for superpage, if they exist. */
1837 dma_pte_clear_range(domain, iov_pfn,
1838 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1839 dma_pte_free_pagetable(domain, iov_pfn,
1840 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1841 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001842 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001843 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001844
David Woodhousee1605492009-06-29 11:17:38 +01001845 }
1846 /* We don't need lock here, nobody else
1847 * touches the iova range
1848 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001849 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001850 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001851 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001852 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1853 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001854 if (dumps) {
1855 dumps--;
1856 debug_dma_dump_mappings(NULL);
1857 }
1858 WARN_ON(1);
1859 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001860
1861 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1862
1863 BUG_ON(nr_pages < lvl_pages);
1864 BUG_ON(sg_res < lvl_pages);
1865
1866 nr_pages -= lvl_pages;
1867 iov_pfn += lvl_pages;
1868 phys_pfn += lvl_pages;
1869 pteval += lvl_pages * VTD_PAGE_SIZE;
1870 sg_res -= lvl_pages;
1871
1872 /* If the next PTE would be the first in a new page, then we
1873 need to flush the cache on the entries we've just written.
1874 And then we'll need to recalculate 'pte', so clear it and
1875 let it get set again in the if (!pte) block above.
1876
1877 If we're done (!nr_pages) we need to flush the cache too.
1878
1879 Also if we've been setting superpages, we may need to
1880 recalculate 'pte' and switch back to smaller pages for the
1881 end of the mapping, if the trailing size is not enough to
1882 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01001883 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001884 if (!nr_pages || first_pte_in_page(pte) ||
1885 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01001886 domain_flush_cache(domain, first_pte,
1887 (void *)pte - (void *)first_pte);
1888 pte = NULL;
1889 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001890
1891 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01001892 sg = sg_next(sg);
1893 }
1894 return 0;
1895}
1896
David Woodhouse9051aa02009-06-29 12:30:54 +01001897static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1898 struct scatterlist *sg, unsigned long nr_pages,
1899 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001900{
David Woodhouse9051aa02009-06-29 12:30:54 +01001901 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1902}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001903
David Woodhouse9051aa02009-06-29 12:30:54 +01001904static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1905 unsigned long phys_pfn, unsigned long nr_pages,
1906 int prot)
1907{
1908 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001909}
1910
Weidong Hanc7151a82008-12-08 22:51:37 +08001911static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001912{
Weidong Hanc7151a82008-12-08 22:51:37 +08001913 if (!iommu)
1914 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001915
1916 clear_context_table(iommu, bus, devfn);
1917 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001918 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001919 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001920}
1921
David Woodhouse109b9b02012-05-25 17:43:02 +01001922static inline void unlink_domain_info(struct device_domain_info *info)
1923{
1924 assert_spin_locked(&device_domain_lock);
1925 list_del(&info->link);
1926 list_del(&info->global);
1927 if (info->dev)
1928 info->dev->dev.archdata.iommu = NULL;
1929}
1930
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001931static void domain_remove_dev_info(struct dmar_domain *domain)
1932{
1933 struct device_domain_info *info;
1934 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001935 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001936
1937 spin_lock_irqsave(&device_domain_lock, flags);
1938 while (!list_empty(&domain->devices)) {
1939 info = list_entry(domain->devices.next,
1940 struct device_domain_info, link);
David Woodhouse109b9b02012-05-25 17:43:02 +01001941 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001942 spin_unlock_irqrestore(&device_domain_lock, flags);
1943
Yu Zhao93a23a72009-05-18 13:51:37 +08001944 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001945 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001946 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001947 free_devinfo_mem(info);
1948
1949 spin_lock_irqsave(&device_domain_lock, flags);
1950 }
1951 spin_unlock_irqrestore(&device_domain_lock, flags);
1952}
1953
1954/*
1955 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001956 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001957 */
Kay, Allen M38717942008-09-09 18:37:29 +03001958static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001959find_domain(struct pci_dev *pdev)
1960{
1961 struct device_domain_info *info;
1962
1963 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001964 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001965 if (info)
1966 return info->domain;
1967 return NULL;
1968}
1969
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001970/* domain is initialized */
1971static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1972{
1973 struct dmar_domain *domain, *found = NULL;
1974 struct intel_iommu *iommu;
1975 struct dmar_drhd_unit *drhd;
1976 struct device_domain_info *info, *tmp;
1977 struct pci_dev *dev_tmp;
1978 unsigned long flags;
1979 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001980 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001981 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001982
1983 domain = find_domain(pdev);
1984 if (domain)
1985 return domain;
1986
David Woodhouse276dbf992009-04-04 01:45:37 +01001987 segment = pci_domain_nr(pdev->bus);
1988
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001989 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1990 if (dev_tmp) {
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001991 if (pci_is_pcie(dev_tmp)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001992 bus = dev_tmp->subordinate->number;
1993 devfn = 0;
1994 } else {
1995 bus = dev_tmp->bus->number;
1996 devfn = dev_tmp->devfn;
1997 }
1998 spin_lock_irqsave(&device_domain_lock, flags);
1999 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01002000 if (info->segment == segment &&
2001 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002002 found = info->domain;
2003 break;
2004 }
2005 }
2006 spin_unlock_irqrestore(&device_domain_lock, flags);
2007 /* pcie-pci bridge already has a domain, uses it */
2008 if (found) {
2009 domain = found;
2010 goto found_domain;
2011 }
2012 }
2013
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002014 domain = alloc_domain();
2015 if (!domain)
2016 goto error;
2017
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002018 /* Allocate new domain for the device */
2019 drhd = dmar_find_matched_drhd_unit(pdev);
2020 if (!drhd) {
2021 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
2022 pci_name(pdev));
Julia Lawalld2900bd2012-07-24 16:18:14 +02002023 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002024 return NULL;
2025 }
2026 iommu = drhd->iommu;
2027
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002028 ret = iommu_attach_domain(domain, iommu);
2029 if (ret) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002030 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002031 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002032 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002033
2034 if (domain_init(domain, gaw)) {
2035 domain_exit(domain);
2036 goto error;
2037 }
2038
2039 /* register pcie-to-pci device */
2040 if (dev_tmp) {
2041 info = alloc_devinfo_mem();
2042 if (!info) {
2043 domain_exit(domain);
2044 goto error;
2045 }
David Woodhouse276dbf992009-04-04 01:45:37 +01002046 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002047 info->bus = bus;
2048 info->devfn = devfn;
2049 info->dev = NULL;
2050 info->domain = domain;
2051 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08002052 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002053
2054 /* pcie-to-pci bridge already has a domain, uses it */
2055 found = NULL;
2056 spin_lock_irqsave(&device_domain_lock, flags);
2057 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01002058 if (tmp->segment == segment &&
2059 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002060 found = tmp->domain;
2061 break;
2062 }
2063 }
2064 if (found) {
Jiri Slaby00dfff72010-06-14 17:17:32 +02002065 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002066 free_devinfo_mem(info);
2067 domain_exit(domain);
2068 domain = found;
2069 } else {
2070 list_add(&info->link, &domain->devices);
2071 list_add(&info->global, &device_domain_list);
Jiri Slaby00dfff72010-06-14 17:17:32 +02002072 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002073 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002074 }
2075
2076found_domain:
2077 info = alloc_devinfo_mem();
2078 if (!info)
2079 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01002080 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002081 info->bus = pdev->bus->number;
2082 info->devfn = pdev->devfn;
2083 info->dev = pdev;
2084 info->domain = domain;
2085 spin_lock_irqsave(&device_domain_lock, flags);
2086 /* somebody is fast */
2087 found = find_domain(pdev);
2088 if (found != NULL) {
2089 spin_unlock_irqrestore(&device_domain_lock, flags);
2090 if (found != domain) {
2091 domain_exit(domain);
2092 domain = found;
2093 }
2094 free_devinfo_mem(info);
2095 return domain;
2096 }
2097 list_add(&info->link, &domain->devices);
2098 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002099 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002100 spin_unlock_irqrestore(&device_domain_lock, flags);
2101 return domain;
2102error:
2103 /* recheck it here, maybe others set it */
2104 return find_domain(pdev);
2105}
2106
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002107static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002108#define IDENTMAP_ALL 1
2109#define IDENTMAP_GFX 2
2110#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002111
David Woodhouseb2132032009-06-26 18:50:28 +01002112static int iommu_domain_identity_map(struct dmar_domain *domain,
2113 unsigned long long start,
2114 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002115{
David Woodhousec5395d52009-06-28 16:35:56 +01002116 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2117 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002118
David Woodhousec5395d52009-06-28 16:35:56 +01002119 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2120 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002121 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002122 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002123 }
2124
David Woodhousec5395d52009-06-28 16:35:56 +01002125 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2126 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002127 /*
2128 * RMRR range might have overlap with physical memory range,
2129 * clear it first
2130 */
David Woodhousec5395d52009-06-28 16:35:56 +01002131 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002132
David Woodhousec5395d52009-06-28 16:35:56 +01002133 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2134 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002135 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002136}
2137
2138static int iommu_prepare_identity_map(struct pci_dev *pdev,
2139 unsigned long long start,
2140 unsigned long long end)
2141{
2142 struct dmar_domain *domain;
2143 int ret;
2144
David Woodhousec7ab48d2009-06-26 19:10:36 +01002145 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002146 if (!domain)
2147 return -ENOMEM;
2148
David Woodhouse19943b02009-08-04 16:19:20 +01002149 /* For _hardware_ passthrough, don't bother. But for software
2150 passthrough, we do it anyway -- it may indicate a memory
2151 range which is reserved in E820, so which didn't get set
2152 up to start with in si_domain */
2153 if (domain == si_domain && hw_pass_through) {
2154 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2155 pci_name(pdev), start, end);
2156 return 0;
2157 }
2158
2159 printk(KERN_INFO
2160 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2161 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002162
David Woodhouse5595b522009-12-02 09:21:55 +00002163 if (end < start) {
2164 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2165 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2166 dmi_get_system_info(DMI_BIOS_VENDOR),
2167 dmi_get_system_info(DMI_BIOS_VERSION),
2168 dmi_get_system_info(DMI_PRODUCT_VERSION));
2169 ret = -EIO;
2170 goto error;
2171 }
2172
David Woodhouse2ff729f2009-08-26 14:25:41 +01002173 if (end >> agaw_to_width(domain->agaw)) {
2174 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2175 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2176 agaw_to_width(domain->agaw),
2177 dmi_get_system_info(DMI_BIOS_VENDOR),
2178 dmi_get_system_info(DMI_BIOS_VERSION),
2179 dmi_get_system_info(DMI_PRODUCT_VERSION));
2180 ret = -EIO;
2181 goto error;
2182 }
David Woodhouse19943b02009-08-04 16:19:20 +01002183
David Woodhouseb2132032009-06-26 18:50:28 +01002184 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002185 if (ret)
2186 goto error;
2187
2188 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002189 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002190 if (ret)
2191 goto error;
2192
2193 return 0;
2194
2195 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002196 domain_exit(domain);
2197 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002198}
2199
2200static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2201 struct pci_dev *pdev)
2202{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002203 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002204 return 0;
2205 return iommu_prepare_identity_map(pdev, rmrr->base_address,
David Woodhouse70e535d2011-05-31 00:22:52 +01002206 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002207}
2208
Suresh Siddhad3f13812011-08-23 17:05:25 -07002209#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002210static inline void iommu_prepare_isa(void)
2211{
2212 struct pci_dev *pdev;
2213 int ret;
2214
2215 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2216 if (!pdev)
2217 return;
2218
David Woodhousec7ab48d2009-06-26 19:10:36 +01002219 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse70e535d2011-05-31 00:22:52 +01002220 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002221
2222 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002223 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2224 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002225
2226}
2227#else
2228static inline void iommu_prepare_isa(void)
2229{
2230 return;
2231}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002232#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002233
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002234static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002235
Matt Kraai071e1372009-08-23 22:30:22 -07002236static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002237{
2238 struct dmar_drhd_unit *drhd;
2239 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002240 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002241
2242 si_domain = alloc_domain();
2243 if (!si_domain)
2244 return -EFAULT;
2245
David Woodhousec7ab48d2009-06-26 19:10:36 +01002246 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002247
2248 for_each_active_iommu(iommu, drhd) {
2249 ret = iommu_attach_domain(si_domain, iommu);
2250 if (ret) {
2251 domain_exit(si_domain);
2252 return -EFAULT;
2253 }
2254 }
2255
2256 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2257 domain_exit(si_domain);
2258 return -EFAULT;
2259 }
2260
2261 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2262
David Woodhouse19943b02009-08-04 16:19:20 +01002263 if (hw)
2264 return 0;
2265
David Woodhousec7ab48d2009-06-26 19:10:36 +01002266 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002267 unsigned long start_pfn, end_pfn;
2268 int i;
2269
2270 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2271 ret = iommu_domain_identity_map(si_domain,
2272 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2273 if (ret)
2274 return ret;
2275 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002276 }
2277
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002278 return 0;
2279}
2280
2281static void domain_remove_one_dev_info(struct dmar_domain *domain,
2282 struct pci_dev *pdev);
2283static int identity_mapping(struct pci_dev *pdev)
2284{
2285 struct device_domain_info *info;
2286
2287 if (likely(!iommu_identity_mapping))
2288 return 0;
2289
Mike Traviscb452a42011-05-28 13:15:03 -05002290 info = pdev->dev.archdata.iommu;
2291 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2292 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002293
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002294 return 0;
2295}
2296
2297static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002298 struct pci_dev *pdev,
2299 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002300{
2301 struct device_domain_info *info;
2302 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002303 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002304
2305 info = alloc_devinfo_mem();
2306 if (!info)
2307 return -ENOMEM;
2308
2309 info->segment = pci_domain_nr(pdev->bus);
2310 info->bus = pdev->bus->number;
2311 info->devfn = pdev->devfn;
2312 info->dev = pdev;
2313 info->domain = domain;
2314
2315 spin_lock_irqsave(&device_domain_lock, flags);
2316 list_add(&info->link, &domain->devices);
2317 list_add(&info->global, &device_domain_list);
2318 pdev->dev.archdata.iommu = info;
2319 spin_unlock_irqrestore(&device_domain_lock, flags);
2320
David Woodhousee2ad23d2012-05-25 17:42:54 +01002321 ret = domain_context_mapping(domain, pdev, translation);
2322 if (ret) {
2323 spin_lock_irqsave(&device_domain_lock, flags);
David Woodhouse109b9b02012-05-25 17:43:02 +01002324 unlink_domain_info(info);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002325 spin_unlock_irqrestore(&device_domain_lock, flags);
2326 free_devinfo_mem(info);
2327 return ret;
2328 }
2329
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002330 return 0;
2331}
2332
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002333static bool device_has_rmrr(struct pci_dev *dev)
2334{
2335 struct dmar_rmrr_unit *rmrr;
2336 int i;
2337
2338 for_each_rmrr_units(rmrr) {
2339 for (i = 0; i < rmrr->devices_cnt; i++) {
2340 /*
2341 * Return TRUE if this RMRR contains the device that
2342 * is passed in.
2343 */
2344 if (rmrr->devices[i] == dev)
2345 return true;
2346 }
2347 }
2348 return false;
2349}
2350
David Woodhouse6941af22009-07-04 18:24:27 +01002351static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2352{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002353
2354 /*
2355 * We want to prevent any device associated with an RMRR from
2356 * getting placed into the SI Domain. This is done because
2357 * problems exist when devices are moved in and out of domains
2358 * and their respective RMRR info is lost. We exempt USB devices
2359 * from this process due to their usage of RMRRs that are known
2360 * to not be needed after BIOS hand-off to OS.
2361 */
2362 if (device_has_rmrr(pdev) &&
2363 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2364 return 0;
2365
David Woodhousee0fc7e02009-09-30 09:12:17 -07002366 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2367 return 1;
2368
2369 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2370 return 1;
2371
2372 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2373 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002374
David Woodhouse3dfc8132009-07-04 19:11:08 +01002375 /*
2376 * We want to start off with all devices in the 1:1 domain, and
2377 * take them out later if we find they can't access all of memory.
2378 *
2379 * However, we can't do this for PCI devices behind bridges,
2380 * because all PCI devices behind the same bridge will end up
2381 * with the same source-id on their transactions.
2382 *
2383 * Practically speaking, we can't change things around for these
2384 * devices at run-time, because we can't be sure there'll be no
2385 * DMA transactions in flight for any of their siblings.
2386 *
2387 * So PCI devices (unless they're on the root bus) as well as
2388 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2389 * the 1:1 domain, just in _case_ one of their siblings turns out
2390 * not to be able to map all of memory.
2391 */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002392 if (!pci_is_pcie(pdev)) {
David Woodhouse3dfc8132009-07-04 19:11:08 +01002393 if (!pci_is_root_bus(pdev->bus))
2394 return 0;
2395 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2396 return 0;
Yijing Wang62f87c02012-07-24 17:20:03 +08002397 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
David Woodhouse3dfc8132009-07-04 19:11:08 +01002398 return 0;
2399
2400 /*
2401 * At boot time, we don't yet know if devices will be 64-bit capable.
2402 * Assume that they will -- if they turn out not to be, then we can
2403 * take them out of the 1:1 domain later.
2404 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002405 if (!startup) {
2406 /*
2407 * If the device's dma_mask is less than the system's memory
2408 * size then this is not a candidate for identity mapping.
2409 */
2410 u64 dma_mask = pdev->dma_mask;
2411
2412 if (pdev->dev.coherent_dma_mask &&
2413 pdev->dev.coherent_dma_mask < dma_mask)
2414 dma_mask = pdev->dev.coherent_dma_mask;
2415
2416 return dma_mask >= dma_get_required_mask(&pdev->dev);
2417 }
David Woodhouse6941af22009-07-04 18:24:27 +01002418
2419 return 1;
2420}
2421
Matt Kraai071e1372009-08-23 22:30:22 -07002422static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002423{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002424 struct pci_dev *pdev = NULL;
2425 int ret;
2426
David Woodhouse19943b02009-08-04 16:19:20 +01002427 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002428 if (ret)
2429 return -EFAULT;
2430
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002431 for_each_pci_dev(pdev) {
David Woodhouse6941af22009-07-04 18:24:27 +01002432 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse5fe60f42009-08-09 10:53:41 +01002433 ret = domain_add_dev_info(si_domain, pdev,
Mike Traviseae460b2012-03-05 15:05:16 -08002434 hw ? CONTEXT_TT_PASS_THROUGH :
2435 CONTEXT_TT_MULTI_LEVEL);
2436 if (ret) {
2437 /* device not associated with an iommu */
2438 if (ret == -ENODEV)
2439 continue;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002440 return ret;
Mike Traviseae460b2012-03-05 15:05:16 -08002441 }
2442 pr_info("IOMMU: %s identity mapping for device %s\n",
2443 hw ? "hardware" : "software", pci_name(pdev));
David Woodhouse62edf5d2009-07-04 10:59:46 +01002444 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002445 }
2446
2447 return 0;
2448}
2449
Joseph Cihulab7792602011-05-03 00:08:37 -07002450static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002451{
2452 struct dmar_drhd_unit *drhd;
2453 struct dmar_rmrr_unit *rmrr;
2454 struct pci_dev *pdev;
2455 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002456 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002457
2458 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002459 * for each drhd
2460 * allocate root
2461 * initialize and program root entry to not present
2462 * endfor
2463 */
2464 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002465 /*
2466 * lock not needed as this is only incremented in the single
2467 * threaded kernel __init code path all other access are read
2468 * only
2469 */
Mike Travis1b198bb2012-03-05 15:05:16 -08002470 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2471 g_num_of_iommus++;
2472 continue;
2473 }
2474 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2475 IOMMU_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002476 }
2477
Weidong Hand9630fe2008-12-08 11:06:32 +08002478 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2479 GFP_KERNEL);
2480 if (!g_iommus) {
2481 printk(KERN_ERR "Allocating global iommu array failed\n");
2482 ret = -ENOMEM;
2483 goto error;
2484 }
2485
mark gross80b20dd2008-04-18 13:53:58 -07002486 deferred_flush = kzalloc(g_num_of_iommus *
2487 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2488 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002489 ret = -ENOMEM;
2490 goto error;
2491 }
2492
mark gross5e0d2a62008-03-04 15:22:08 -08002493 for_each_drhd_unit(drhd) {
2494 if (drhd->ignored)
2495 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002496
2497 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002498 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002499
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002500 ret = iommu_init_domains(iommu);
2501 if (ret)
2502 goto error;
2503
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002504 /*
2505 * TBD:
2506 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002507 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002508 */
2509 ret = iommu_alloc_root_entry(iommu);
2510 if (ret) {
2511 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2512 goto error;
2513 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002514 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002515 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002516 }
2517
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002518 /*
2519 * Start from the sane iommu hardware state.
2520 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002521 for_each_drhd_unit(drhd) {
2522 if (drhd->ignored)
2523 continue;
2524
2525 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002526
2527 /*
2528 * If the queued invalidation is already initialized by us
2529 * (for example, while enabling interrupt-remapping) then
2530 * we got the things already rolling from a sane state.
2531 */
2532 if (iommu->qi)
2533 continue;
2534
2535 /*
2536 * Clear any previous faults.
2537 */
2538 dmar_fault(-1, iommu);
2539 /*
2540 * Disable queued invalidation if supported and already enabled
2541 * before OS handover.
2542 */
2543 dmar_disable_qi(iommu);
2544 }
2545
2546 for_each_drhd_unit(drhd) {
2547 if (drhd->ignored)
2548 continue;
2549
2550 iommu = drhd->iommu;
2551
Youquan Songa77b67d2008-10-16 16:31:56 -07002552 if (dmar_enable_qi(iommu)) {
2553 /*
2554 * Queued Invalidate not enabled, use Register Based
2555 * Invalidate
2556 */
2557 iommu->flush.flush_context = __iommu_flush_context;
2558 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002559 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002560 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002561 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002562 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002563 } else {
2564 iommu->flush.flush_context = qi_flush_context;
2565 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002566 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002567 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002568 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002569 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002570 }
2571 }
2572
David Woodhouse19943b02009-08-04 16:19:20 +01002573 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002574 iommu_identity_mapping |= IDENTMAP_ALL;
2575
Suresh Siddhad3f13812011-08-23 17:05:25 -07002576#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002577 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002578#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002579
2580 check_tylersburg_isoch();
2581
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002582 /*
2583 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002584 * identity mappings for rmrr, gfx, and isa and may fall back to static
2585 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002586 */
David Woodhouse19943b02009-08-04 16:19:20 +01002587 if (iommu_identity_mapping) {
2588 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2589 if (ret) {
2590 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2591 goto error;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002592 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002593 }
David Woodhouse19943b02009-08-04 16:19:20 +01002594 /*
2595 * For each rmrr
2596 * for each dev attached to rmrr
2597 * do
2598 * locate drhd for dev, alloc domain for dev
2599 * allocate free domain
2600 * allocate page table entries for rmrr
2601 * if context not allocated for bus
2602 * allocate and init context
2603 * set present in root table for this bus
2604 * init context with domain, translation etc
2605 * endfor
2606 * endfor
2607 */
2608 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2609 for_each_rmrr_units(rmrr) {
2610 for (i = 0; i < rmrr->devices_cnt; i++) {
2611 pdev = rmrr->devices[i];
2612 /*
2613 * some BIOS lists non-exist devices in DMAR
2614 * table.
2615 */
2616 if (!pdev)
2617 continue;
2618 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2619 if (ret)
2620 printk(KERN_ERR
2621 "IOMMU: mapping reserved region failed\n");
2622 }
2623 }
2624
2625 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002626
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002627 /*
2628 * for each drhd
2629 * enable fault log
2630 * global invalidate context cache
2631 * global invalidate iotlb
2632 * enable translation
2633 */
2634 for_each_drhd_unit(drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002635 if (drhd->ignored) {
2636 /*
2637 * we always have to disable PMRs or DMA may fail on
2638 * this device
2639 */
2640 if (force_on)
2641 iommu_disable_protect_mem_regions(drhd->iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002642 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002643 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002644 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002645
2646 iommu_flush_write_buffer(iommu);
2647
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002648 ret = dmar_set_interrupt(iommu);
2649 if (ret)
2650 goto error;
2651
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002652 iommu_set_root_entry(iommu);
2653
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002654 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002655 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002656
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002657 ret = iommu_enable_translation(iommu);
2658 if (ret)
2659 goto error;
David Woodhouseb94996c2009-09-19 15:28:12 -07002660
2661 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002662 }
2663
2664 return 0;
2665error:
2666 for_each_drhd_unit(drhd) {
2667 if (drhd->ignored)
2668 continue;
2669 iommu = drhd->iommu;
2670 free_iommu(iommu);
2671 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002672 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002673 return ret;
2674}
2675
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002676/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002677static struct iova *intel_alloc_iova(struct device *dev,
2678 struct dmar_domain *domain,
2679 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002680{
2681 struct pci_dev *pdev = to_pci_dev(dev);
2682 struct iova *iova = NULL;
2683
David Woodhouse875764d2009-06-28 21:20:51 +01002684 /* Restrict dma_mask to the width that the iommu can handle */
2685 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2686
2687 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002688 /*
2689 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002690 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002691 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002692 */
David Woodhouse875764d2009-06-28 21:20:51 +01002693 iova = alloc_iova(&domain->iovad, nrpages,
2694 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2695 if (iova)
2696 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002697 }
David Woodhouse875764d2009-06-28 21:20:51 +01002698 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2699 if (unlikely(!iova)) {
2700 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2701 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002702 return NULL;
2703 }
2704
2705 return iova;
2706}
2707
David Woodhouse147202a2009-07-07 19:43:20 +01002708static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002709{
2710 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002711 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002712
2713 domain = get_domain_for_dev(pdev,
2714 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2715 if (!domain) {
2716 printk(KERN_ERR
2717 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002718 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002719 }
2720
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002721 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002722 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002723 ret = domain_context_mapping(domain, pdev,
2724 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002725 if (ret) {
2726 printk(KERN_ERR
2727 "Domain context map for %s failed",
2728 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002729 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002730 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002731 }
2732
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002733 return domain;
2734}
2735
David Woodhouse147202a2009-07-07 19:43:20 +01002736static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2737{
2738 struct device_domain_info *info;
2739
2740 /* No lock here, assumes no domain exit in normal case */
2741 info = dev->dev.archdata.iommu;
2742 if (likely(info))
2743 return info->domain;
2744
2745 return __get_valid_domain_for_dev(dev);
2746}
2747
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002748static int iommu_dummy(struct pci_dev *pdev)
2749{
2750 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2751}
2752
2753/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002754static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002755{
David Woodhouse73676832009-07-04 14:08:36 +01002756 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002757 int found;
2758
David Woodhouse73676832009-07-04 14:08:36 +01002759 if (unlikely(dev->bus != &pci_bus_type))
2760 return 1;
2761
2762 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002763 if (iommu_dummy(pdev))
2764 return 1;
2765
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002766 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002767 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002768
2769 found = identity_mapping(pdev);
2770 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002771 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002772 return 1;
2773 else {
2774 /*
2775 * 32 bit DMA is removed from si_domain and fall back
2776 * to non-identity mapping.
2777 */
2778 domain_remove_one_dev_info(si_domain, pdev);
2779 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2780 pci_name(pdev));
2781 return 0;
2782 }
2783 } else {
2784 /*
2785 * In case of a detached 64 bit DMA device from vm, the device
2786 * is put into si_domain for identity mapping.
2787 */
David Woodhouse6941af22009-07-04 18:24:27 +01002788 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002789 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002790 ret = domain_add_dev_info(si_domain, pdev,
2791 hw_pass_through ?
2792 CONTEXT_TT_PASS_THROUGH :
2793 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002794 if (!ret) {
2795 printk(KERN_INFO "64bit %s uses identity mapping\n",
2796 pci_name(pdev));
2797 return 1;
2798 }
2799 }
2800 }
2801
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002802 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002803}
2804
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002805static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2806 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002807{
2808 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002809 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002810 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002811 struct iova *iova;
2812 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002813 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002814 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002815 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002816
2817 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002818
David Woodhouse73676832009-07-04 14:08:36 +01002819 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002820 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002821
2822 domain = get_valid_domain_for_dev(pdev);
2823 if (!domain)
2824 return 0;
2825
Weidong Han8c11e792008-12-08 15:29:22 +08002826 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002827 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002828
Mike Travisc681d0b2011-05-28 13:15:05 -05002829 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002830 if (!iova)
2831 goto error;
2832
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002833 /*
2834 * Check if DMAR supports zero-length reads on write only
2835 * mappings..
2836 */
2837 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002838 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002839 prot |= DMA_PTE_READ;
2840 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2841 prot |= DMA_PTE_WRITE;
2842 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002843 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002844 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002845 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002846 * is not a big problem
2847 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002848 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002849 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002850 if (ret)
2851 goto error;
2852
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002853 /* it's a non-present to present mapping. Only flush if caching mode */
2854 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002855 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002856 else
Weidong Han8c11e792008-12-08 15:29:22 +08002857 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002858
David Woodhouse03d6a242009-06-28 15:33:46 +01002859 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2860 start_paddr += paddr & ~PAGE_MASK;
2861 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002862
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002863error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002864 if (iova)
2865 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002866 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002867 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002868 return 0;
2869}
2870
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002871static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2872 unsigned long offset, size_t size,
2873 enum dma_data_direction dir,
2874 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002875{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002876 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2877 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002878}
2879
mark gross5e0d2a62008-03-04 15:22:08 -08002880static void flush_unmaps(void)
2881{
mark gross80b20dd2008-04-18 13:53:58 -07002882 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002883
mark gross5e0d2a62008-03-04 15:22:08 -08002884 timer_on = 0;
2885
2886 /* just flush them all */
2887 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002888 struct intel_iommu *iommu = g_iommus[i];
2889 if (!iommu)
2890 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002891
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002892 if (!deferred_flush[i].next)
2893 continue;
2894
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002895 /* In caching mode, global flushes turn emulation expensive */
2896 if (!cap_caching_mode(iommu->cap))
2897 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002898 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002899 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002900 unsigned long mask;
2901 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002902 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08002903
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002904 /* On real hardware multiple invalidations are expensive */
2905 if (cap_caching_mode(iommu->cap))
2906 iommu_flush_iotlb_psi(iommu, domain->id,
2907 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2908 else {
2909 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2910 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2911 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2912 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002913 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002914 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002915 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002916 }
2917
mark gross5e0d2a62008-03-04 15:22:08 -08002918 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002919}
2920
2921static void flush_unmaps_timeout(unsigned long data)
2922{
mark gross80b20dd2008-04-18 13:53:58 -07002923 unsigned long flags;
2924
2925 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002926 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002927 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002928}
2929
2930static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2931{
2932 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002933 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002934 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002935
2936 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002937 if (list_size == HIGH_WATER_MARK)
2938 flush_unmaps();
2939
Weidong Han8c11e792008-12-08 15:29:22 +08002940 iommu = domain_get_iommu(dom);
2941 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002942
mark gross80b20dd2008-04-18 13:53:58 -07002943 next = deferred_flush[iommu_id].next;
2944 deferred_flush[iommu_id].domain[next] = dom;
2945 deferred_flush[iommu_id].iova[next] = iova;
2946 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002947
2948 if (!timer_on) {
2949 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2950 timer_on = 1;
2951 }
2952 list_size++;
2953 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2954}
2955
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002956static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2957 size_t size, enum dma_data_direction dir,
2958 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002959{
2960 struct pci_dev *pdev = to_pci_dev(dev);
2961 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002962 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002963 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002964 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002965
David Woodhouse73676832009-07-04 14:08:36 +01002966 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002967 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002968
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002969 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002970 BUG_ON(!domain);
2971
Weidong Han8c11e792008-12-08 15:29:22 +08002972 iommu = domain_get_iommu(domain);
2973
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002974 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002975 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2976 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002977 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002978
David Woodhoused794dc92009-06-28 00:27:49 +01002979 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2980 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002981
David Woodhoused794dc92009-06-28 00:27:49 +01002982 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2983 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002984
2985 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002986 dma_pte_clear_range(domain, start_pfn, last_pfn);
2987
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002988 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002989 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2990
mark gross5e0d2a62008-03-04 15:22:08 -08002991 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002992 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002993 last_pfn - start_pfn + 1, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08002994 /* free iova */
2995 __free_iova(&domain->iovad, iova);
2996 } else {
2997 add_unmap(domain, iova);
2998 /*
2999 * queue up the release of the unmap to save the 1/6th of the
3000 * cpu used up by the iotlb flush operation...
3001 */
mark gross5e0d2a62008-03-04 15:22:08 -08003002 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003003}
3004
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003005static void *intel_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003006 dma_addr_t *dma_handle, gfp_t flags,
3007 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003008{
3009 void *vaddr;
3010 int order;
3011
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003012 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003013 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003014
3015 if (!iommu_no_mapping(hwdev))
3016 flags &= ~(GFP_DMA | GFP_DMA32);
3017 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
3018 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
3019 flags |= GFP_DMA;
3020 else
3021 flags |= GFP_DMA32;
3022 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003023
3024 vaddr = (void *)__get_free_pages(flags, order);
3025 if (!vaddr)
3026 return NULL;
3027 memset(vaddr, 0, size);
3028
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003029 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
3030 DMA_BIDIRECTIONAL,
3031 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003032 if (*dma_handle)
3033 return vaddr;
3034 free_pages((unsigned long)vaddr, order);
3035 return NULL;
3036}
3037
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003038static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003039 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003040{
3041 int order;
3042
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003043 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003044 order = get_order(size);
3045
David Woodhouse0db9b7a2009-07-14 02:01:57 +01003046 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003047 free_pages((unsigned long)vaddr, order);
3048}
3049
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003050static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
3051 int nelems, enum dma_data_direction dir,
3052 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003053{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003054 struct pci_dev *pdev = to_pci_dev(hwdev);
3055 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003056 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003057 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003058 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003059
David Woodhouse73676832009-07-04 14:08:36 +01003060 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003061 return;
3062
3063 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08003064 BUG_ON(!domain);
3065
3066 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003067
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003068 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01003069 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3070 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003071 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003072
David Woodhoused794dc92009-06-28 00:27:49 +01003073 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3074 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003075
3076 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01003077 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003078
David Woodhoused794dc92009-06-28 00:27:49 +01003079 /* free page tables */
3080 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3081
David Woodhouseacea0012009-07-14 01:55:11 +01003082 if (intel_iommu_strict) {
3083 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03003084 last_pfn - start_pfn + 1, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003085 /* free iova */
3086 __free_iova(&domain->iovad, iova);
3087 } else {
3088 add_unmap(domain, iova);
3089 /*
3090 * queue up the release of the unmap to save the 1/6th of the
3091 * cpu used up by the iotlb flush operation...
3092 */
3093 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003094}
3095
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003096static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003097 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003098{
3099 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003100 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003101
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003102 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003103 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003104 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003105 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003106 }
3107 return nelems;
3108}
3109
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003110static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3111 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003112{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003113 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003114 struct pci_dev *pdev = to_pci_dev(hwdev);
3115 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003116 size_t size = 0;
3117 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003118 struct iova *iova = NULL;
3119 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003120 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003121 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003122 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003123
3124 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01003125 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003126 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003127
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003128 domain = get_valid_domain_for_dev(pdev);
3129 if (!domain)
3130 return 0;
3131
Weidong Han8c11e792008-12-08 15:29:22 +08003132 iommu = domain_get_iommu(domain);
3133
David Woodhouseb536d242009-06-28 14:49:31 +01003134 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003135 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003136
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003137 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3138 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003139 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003140 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003141 return 0;
3142 }
3143
3144 /*
3145 * Check if DMAR supports zero-length reads on write only
3146 * mappings..
3147 */
3148 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003149 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003150 prot |= DMA_PTE_READ;
3151 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3152 prot |= DMA_PTE_WRITE;
3153
David Woodhouseb536d242009-06-28 14:49:31 +01003154 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003155
Fenghua Yuf5329592009-08-04 15:09:37 -07003156 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003157 if (unlikely(ret)) {
3158 /* clear the page */
3159 dma_pte_clear_range(domain, start_vpfn,
3160 start_vpfn + size - 1);
3161 /* free page tables */
3162 dma_pte_free_pagetable(domain, start_vpfn,
3163 start_vpfn + size - 1);
3164 /* free iova */
3165 __free_iova(&domain->iovad, iova);
3166 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003167 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003168
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003169 /* it's a non-present to present mapping. Only flush if caching mode */
3170 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03003171 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003172 else
Weidong Han8c11e792008-12-08 15:29:22 +08003173 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003174
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003175 return nelems;
3176}
3177
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003178static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3179{
3180 return !dma_addr;
3181}
3182
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003183struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003184 .alloc = intel_alloc_coherent,
3185 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003186 .map_sg = intel_map_sg,
3187 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003188 .map_page = intel_map_page,
3189 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003190 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003191};
3192
3193static inline int iommu_domain_cache_init(void)
3194{
3195 int ret = 0;
3196
3197 iommu_domain_cache = kmem_cache_create("iommu_domain",
3198 sizeof(struct dmar_domain),
3199 0,
3200 SLAB_HWCACHE_ALIGN,
3201
3202 NULL);
3203 if (!iommu_domain_cache) {
3204 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3205 ret = -ENOMEM;
3206 }
3207
3208 return ret;
3209}
3210
3211static inline int iommu_devinfo_cache_init(void)
3212{
3213 int ret = 0;
3214
3215 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3216 sizeof(struct device_domain_info),
3217 0,
3218 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003219 NULL);
3220 if (!iommu_devinfo_cache) {
3221 printk(KERN_ERR "Couldn't create devinfo cache\n");
3222 ret = -ENOMEM;
3223 }
3224
3225 return ret;
3226}
3227
3228static inline int iommu_iova_cache_init(void)
3229{
3230 int ret = 0;
3231
3232 iommu_iova_cache = kmem_cache_create("iommu_iova",
3233 sizeof(struct iova),
3234 0,
3235 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003236 NULL);
3237 if (!iommu_iova_cache) {
3238 printk(KERN_ERR "Couldn't create iova cache\n");
3239 ret = -ENOMEM;
3240 }
3241
3242 return ret;
3243}
3244
3245static int __init iommu_init_mempool(void)
3246{
3247 int ret;
3248 ret = iommu_iova_cache_init();
3249 if (ret)
3250 return ret;
3251
3252 ret = iommu_domain_cache_init();
3253 if (ret)
3254 goto domain_error;
3255
3256 ret = iommu_devinfo_cache_init();
3257 if (!ret)
3258 return ret;
3259
3260 kmem_cache_destroy(iommu_domain_cache);
3261domain_error:
3262 kmem_cache_destroy(iommu_iova_cache);
3263
3264 return -ENOMEM;
3265}
3266
3267static void __init iommu_exit_mempool(void)
3268{
3269 kmem_cache_destroy(iommu_devinfo_cache);
3270 kmem_cache_destroy(iommu_domain_cache);
3271 kmem_cache_destroy(iommu_iova_cache);
3272
3273}
3274
Dan Williams556ab452010-07-23 15:47:56 -07003275static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3276{
3277 struct dmar_drhd_unit *drhd;
3278 u32 vtbar;
3279 int rc;
3280
3281 /* We know that this device on this chipset has its own IOMMU.
3282 * If we find it under a different IOMMU, then the BIOS is lying
3283 * to us. Hope that the IOMMU for this device is actually
3284 * disabled, and it needs no translation...
3285 */
3286 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3287 if (rc) {
3288 /* "can't" happen */
3289 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3290 return;
3291 }
3292 vtbar &= 0xffff0000;
3293
3294 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3295 drhd = dmar_find_matched_drhd_unit(pdev);
3296 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3297 TAINT_FIRMWARE_WORKAROUND,
3298 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3299 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3300}
3301DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3302
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003303static void __init init_no_remapping_devices(void)
3304{
3305 struct dmar_drhd_unit *drhd;
3306
3307 for_each_drhd_unit(drhd) {
3308 if (!drhd->include_all) {
3309 int i;
3310 for (i = 0; i < drhd->devices_cnt; i++)
3311 if (drhd->devices[i] != NULL)
3312 break;
3313 /* ignore DMAR unit if no pci devices exist */
3314 if (i == drhd->devices_cnt)
3315 drhd->ignored = 1;
3316 }
3317 }
3318
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003319 for_each_drhd_unit(drhd) {
3320 int i;
3321 if (drhd->ignored || drhd->include_all)
3322 continue;
3323
3324 for (i = 0; i < drhd->devices_cnt; i++)
3325 if (drhd->devices[i] &&
David Woodhousec0771df2011-10-14 20:59:46 +01003326 !IS_GFX_DEVICE(drhd->devices[i]))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003327 break;
3328
3329 if (i < drhd->devices_cnt)
3330 continue;
3331
David Woodhousec0771df2011-10-14 20:59:46 +01003332 /* This IOMMU has *only* gfx devices. Either bypass it or
3333 set the gfx_mapped flag, as appropriate */
3334 if (dmar_map_gfx) {
3335 intel_iommu_gfx_mapped = 1;
3336 } else {
3337 drhd->ignored = 1;
3338 for (i = 0; i < drhd->devices_cnt; i++) {
3339 if (!drhd->devices[i])
3340 continue;
3341 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3342 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003343 }
3344 }
3345}
3346
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003347#ifdef CONFIG_SUSPEND
3348static int init_iommu_hw(void)
3349{
3350 struct dmar_drhd_unit *drhd;
3351 struct intel_iommu *iommu = NULL;
3352
3353 for_each_active_iommu(iommu, drhd)
3354 if (iommu->qi)
3355 dmar_reenable_qi(iommu);
3356
Joseph Cihulab7792602011-05-03 00:08:37 -07003357 for_each_iommu(iommu, drhd) {
3358 if (drhd->ignored) {
3359 /*
3360 * we always have to disable PMRs or DMA may fail on
3361 * this device
3362 */
3363 if (force_on)
3364 iommu_disable_protect_mem_regions(iommu);
3365 continue;
3366 }
3367
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003368 iommu_flush_write_buffer(iommu);
3369
3370 iommu_set_root_entry(iommu);
3371
3372 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003373 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003374 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003375 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003376 if (iommu_enable_translation(iommu))
3377 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003378 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003379 }
3380
3381 return 0;
3382}
3383
3384static void iommu_flush_all(void)
3385{
3386 struct dmar_drhd_unit *drhd;
3387 struct intel_iommu *iommu;
3388
3389 for_each_active_iommu(iommu, drhd) {
3390 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003391 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003392 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003393 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003394 }
3395}
3396
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003397static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003398{
3399 struct dmar_drhd_unit *drhd;
3400 struct intel_iommu *iommu = NULL;
3401 unsigned long flag;
3402
3403 for_each_active_iommu(iommu, drhd) {
3404 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3405 GFP_ATOMIC);
3406 if (!iommu->iommu_state)
3407 goto nomem;
3408 }
3409
3410 iommu_flush_all();
3411
3412 for_each_active_iommu(iommu, drhd) {
3413 iommu_disable_translation(iommu);
3414
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003415 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003416
3417 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3418 readl(iommu->reg + DMAR_FECTL_REG);
3419 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3420 readl(iommu->reg + DMAR_FEDATA_REG);
3421 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3422 readl(iommu->reg + DMAR_FEADDR_REG);
3423 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3424 readl(iommu->reg + DMAR_FEUADDR_REG);
3425
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003426 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003427 }
3428 return 0;
3429
3430nomem:
3431 for_each_active_iommu(iommu, drhd)
3432 kfree(iommu->iommu_state);
3433
3434 return -ENOMEM;
3435}
3436
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003437static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003438{
3439 struct dmar_drhd_unit *drhd;
3440 struct intel_iommu *iommu = NULL;
3441 unsigned long flag;
3442
3443 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003444 if (force_on)
3445 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3446 else
3447 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003448 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003449 }
3450
3451 for_each_active_iommu(iommu, drhd) {
3452
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003453 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003454
3455 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3456 iommu->reg + DMAR_FECTL_REG);
3457 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3458 iommu->reg + DMAR_FEDATA_REG);
3459 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3460 iommu->reg + DMAR_FEADDR_REG);
3461 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3462 iommu->reg + DMAR_FEUADDR_REG);
3463
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003464 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003465 }
3466
3467 for_each_active_iommu(iommu, drhd)
3468 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003469}
3470
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003471static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003472 .resume = iommu_resume,
3473 .suspend = iommu_suspend,
3474};
3475
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003476static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003477{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003478 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003479}
3480
3481#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003482static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003483#endif /* CONFIG_PM */
3484
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003485LIST_HEAD(dmar_rmrr_units);
3486
3487static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3488{
3489 list_add(&rmrr->list, &dmar_rmrr_units);
3490}
3491
3492
3493int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3494{
3495 struct acpi_dmar_reserved_memory *rmrr;
3496 struct dmar_rmrr_unit *rmrru;
3497
3498 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3499 if (!rmrru)
3500 return -ENOMEM;
3501
3502 rmrru->hdr = header;
3503 rmrr = (struct acpi_dmar_reserved_memory *)header;
3504 rmrru->base_address = rmrr->base_address;
3505 rmrru->end_address = rmrr->end_address;
3506
3507 dmar_register_rmrr_unit(rmrru);
3508 return 0;
3509}
3510
3511static int __init
3512rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3513{
3514 struct acpi_dmar_reserved_memory *rmrr;
3515 int ret;
3516
3517 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3518 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3519 ((void *)rmrr) + rmrr->header.length,
3520 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3521
3522 if (ret || (rmrru->devices_cnt == 0)) {
3523 list_del(&rmrru->list);
3524 kfree(rmrru);
3525 }
3526 return ret;
3527}
3528
3529static LIST_HEAD(dmar_atsr_units);
3530
3531int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3532{
3533 struct acpi_dmar_atsr *atsr;
3534 struct dmar_atsr_unit *atsru;
3535
3536 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3537 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3538 if (!atsru)
3539 return -ENOMEM;
3540
3541 atsru->hdr = hdr;
3542 atsru->include_all = atsr->flags & 0x1;
3543
3544 list_add(&atsru->list, &dmar_atsr_units);
3545
3546 return 0;
3547}
3548
3549static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3550{
3551 int rc;
3552 struct acpi_dmar_atsr *atsr;
3553
3554 if (atsru->include_all)
3555 return 0;
3556
3557 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3558 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3559 (void *)atsr + atsr->header.length,
3560 &atsru->devices_cnt, &atsru->devices,
3561 atsr->segment);
3562 if (rc || !atsru->devices_cnt) {
3563 list_del(&atsru->list);
3564 kfree(atsru);
3565 }
3566
3567 return rc;
3568}
3569
3570int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3571{
3572 int i;
3573 struct pci_bus *bus;
3574 struct acpi_dmar_atsr *atsr;
3575 struct dmar_atsr_unit *atsru;
3576
3577 dev = pci_physfn(dev);
3578
3579 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3580 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3581 if (atsr->segment == pci_domain_nr(dev->bus))
3582 goto found;
3583 }
3584
3585 return 0;
3586
3587found:
3588 for (bus = dev->bus; bus; bus = bus->parent) {
3589 struct pci_dev *bridge = bus->self;
3590
3591 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003592 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003593 return 0;
3594
Yijing Wang62f87c02012-07-24 17:20:03 +08003595 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003596 for (i = 0; i < atsru->devices_cnt; i++)
3597 if (atsru->devices[i] == bridge)
3598 return 1;
3599 break;
3600 }
3601 }
3602
3603 if (atsru->include_all)
3604 return 1;
3605
3606 return 0;
3607}
3608
Sergey Senozhatskyc8f369a2011-10-26 18:45:39 +03003609int __init dmar_parse_rmrr_atsr_dev(void)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003610{
3611 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3612 struct dmar_atsr_unit *atsr, *atsr_n;
3613 int ret = 0;
3614
3615 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3616 ret = rmrr_parse_dev(rmrr);
3617 if (ret)
3618 return ret;
3619 }
3620
3621 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3622 ret = atsr_parse_dev(atsr);
3623 if (ret)
3624 return ret;
3625 }
3626
3627 return ret;
3628}
3629
Fenghua Yu99dcade2009-11-11 07:23:06 -08003630/*
3631 * Here we only respond to action of unbound device from driver.
3632 *
3633 * Added device is not attached to its DMAR domain here yet. That will happen
3634 * when mapping the device to iova.
3635 */
3636static int device_notifier(struct notifier_block *nb,
3637 unsigned long action, void *data)
3638{
3639 struct device *dev = data;
3640 struct pci_dev *pdev = to_pci_dev(dev);
3641 struct dmar_domain *domain;
3642
David Woodhouse44cd6132009-12-02 10:18:30 +00003643 if (iommu_no_mapping(dev))
3644 return 0;
3645
Fenghua Yu99dcade2009-11-11 07:23:06 -08003646 domain = find_domain(pdev);
3647 if (!domain)
3648 return 0;
3649
Alex Williamsona97590e2011-03-04 14:52:16 -07003650 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
Fenghua Yu99dcade2009-11-11 07:23:06 -08003651 domain_remove_one_dev_info(domain, pdev);
3652
Alex Williamsona97590e2011-03-04 14:52:16 -07003653 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3654 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3655 list_empty(&domain->devices))
3656 domain_exit(domain);
3657 }
3658
Fenghua Yu99dcade2009-11-11 07:23:06 -08003659 return 0;
3660}
3661
3662static struct notifier_block device_nb = {
3663 .notifier_call = device_notifier,
3664};
3665
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003666int __init intel_iommu_init(void)
3667{
3668 int ret = 0;
Takao Indoh3a93c842013-04-23 17:35:03 +09003669 struct dmar_drhd_unit *drhd;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003670
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003671 /* VT-d is required for a TXT/tboot launch, so enforce that */
3672 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003673
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003674 if (dmar_table_init()) {
3675 if (force_on)
3676 panic("tboot: Failed to initialize DMAR table\n");
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003677 return -ENODEV;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003678 }
3679
Takao Indoh3a93c842013-04-23 17:35:03 +09003680 /*
3681 * Disable translation if already enabled prior to OS handover.
3682 */
3683 for_each_drhd_unit(drhd) {
3684 struct intel_iommu *iommu;
3685
3686 if (drhd->ignored)
3687 continue;
3688
3689 iommu = drhd->iommu;
3690 if (iommu->gcmd & DMA_GCMD_TE)
3691 iommu_disable_translation(iommu);
3692 }
3693
Suresh Siddhac2c72862011-08-23 17:05:19 -07003694 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003695 if (force_on)
3696 panic("tboot: Failed to initialize DMAR device scope\n");
3697 return -ENODEV;
3698 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003699
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003700 if (no_iommu || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003701 return -ENODEV;
3702
Joseph Cihula51a63e62011-03-21 11:04:24 -07003703 if (iommu_init_mempool()) {
3704 if (force_on)
3705 panic("tboot: Failed to initialize iommu memory\n");
3706 return -ENODEV;
3707 }
3708
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003709 if (list_empty(&dmar_rmrr_units))
3710 printk(KERN_INFO "DMAR: No RMRR found\n");
3711
3712 if (list_empty(&dmar_atsr_units))
3713 printk(KERN_INFO "DMAR: No ATSR found\n");
3714
Joseph Cihula51a63e62011-03-21 11:04:24 -07003715 if (dmar_init_reserved_ranges()) {
3716 if (force_on)
3717 panic("tboot: Failed to reserve iommu ranges\n");
3718 return -ENODEV;
3719 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003720
3721 init_no_remapping_devices();
3722
Joseph Cihulab7792602011-05-03 00:08:37 -07003723 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003724 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003725 if (force_on)
3726 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003727 printk(KERN_ERR "IOMMU: dmar init failed\n");
3728 put_iova_domain(&reserved_iova_list);
3729 iommu_exit_mempool();
3730 return ret;
3731 }
3732 printk(KERN_INFO
3733 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3734
mark gross5e0d2a62008-03-04 15:22:08 -08003735 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003736#ifdef CONFIG_SWIOTLB
3737 swiotlb = 0;
3738#endif
David Woodhouse19943b02009-08-04 16:19:20 +01003739 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003740
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003741 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003742
Joerg Roedel4236d97d2011-09-06 17:56:07 +02003743 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003744
Fenghua Yu99dcade2009-11-11 07:23:06 -08003745 bus_register_notifier(&pci_bus_type, &device_nb);
3746
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02003747 intel_iommu_enabled = 1;
3748
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003749 return 0;
3750}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003751
Han, Weidong3199aa62009-02-26 17:31:12 +08003752static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3753 struct pci_dev *pdev)
3754{
3755 struct pci_dev *tmp, *parent;
3756
3757 if (!iommu || !pdev)
3758 return;
3759
3760 /* dependent device detach */
3761 tmp = pci_find_upstream_pcie_bridge(pdev);
3762 /* Secondary interface's bus number and devfn 0 */
3763 if (tmp) {
3764 parent = pdev->bus->self;
3765 while (parent != tmp) {
3766 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003767 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003768 parent = parent->bus->self;
3769 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05003770 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08003771 iommu_detach_dev(iommu,
3772 tmp->subordinate->number, 0);
3773 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003774 iommu_detach_dev(iommu, tmp->bus->number,
3775 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003776 }
3777}
3778
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003779static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003780 struct pci_dev *pdev)
3781{
3782 struct device_domain_info *info;
3783 struct intel_iommu *iommu;
3784 unsigned long flags;
3785 int found = 0;
3786 struct list_head *entry, *tmp;
3787
David Woodhouse276dbf992009-04-04 01:45:37 +01003788 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3789 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003790 if (!iommu)
3791 return;
3792
3793 spin_lock_irqsave(&device_domain_lock, flags);
3794 list_for_each_safe(entry, tmp, &domain->devices) {
3795 info = list_entry(entry, struct device_domain_info, link);
Mike Habeck8519dc42011-05-28 13:15:07 -05003796 if (info->segment == pci_domain_nr(pdev->bus) &&
3797 info->bus == pdev->bus->number &&
Weidong Hanc7151a82008-12-08 22:51:37 +08003798 info->devfn == pdev->devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01003799 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003800 spin_unlock_irqrestore(&device_domain_lock, flags);
3801
Yu Zhao93a23a72009-05-18 13:51:37 +08003802 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003803 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003804 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003805 free_devinfo_mem(info);
3806
3807 spin_lock_irqsave(&device_domain_lock, flags);
3808
3809 if (found)
3810 break;
3811 else
3812 continue;
3813 }
3814
3815 /* if there is no other devices under the same iommu
3816 * owned by this domain, clear this iommu in iommu_bmp
3817 * update iommu count and coherency
3818 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003819 if (iommu == device_to_iommu(info->segment, info->bus,
3820 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003821 found = 1;
3822 }
3823
Roland Dreier3e7abe22011-07-20 06:22:21 -07003824 spin_unlock_irqrestore(&device_domain_lock, flags);
3825
Weidong Hanc7151a82008-12-08 22:51:37 +08003826 if (found == 0) {
3827 unsigned long tmp_flags;
3828 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08003829 clear_bit(iommu->seq_id, domain->iommu_bmp);
Weidong Hanc7151a82008-12-08 22:51:37 +08003830 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003831 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003832 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07003833
Alex Williamson9b4554b2011-05-24 12:19:04 -04003834 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3835 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3836 spin_lock_irqsave(&iommu->lock, tmp_flags);
3837 clear_bit(domain->id, iommu->domain_ids);
3838 iommu->domains[domain->id] = NULL;
3839 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3840 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003841 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003842}
3843
3844static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3845{
3846 struct device_domain_info *info;
3847 struct intel_iommu *iommu;
3848 unsigned long flags1, flags2;
3849
3850 spin_lock_irqsave(&device_domain_lock, flags1);
3851 while (!list_empty(&domain->devices)) {
3852 info = list_entry(domain->devices.next,
3853 struct device_domain_info, link);
David Woodhouse109b9b02012-05-25 17:43:02 +01003854 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003855 spin_unlock_irqrestore(&device_domain_lock, flags1);
3856
Yu Zhao93a23a72009-05-18 13:51:37 +08003857 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003858 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003859 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003860 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003861
3862 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003863 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003864 */
3865 spin_lock_irqsave(&domain->iommu_lock, flags2);
3866 if (test_and_clear_bit(iommu->seq_id,
Mike Travis1b198bb2012-03-05 15:05:16 -08003867 domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08003868 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003869 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003870 }
3871 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3872
3873 free_devinfo_mem(info);
3874 spin_lock_irqsave(&device_domain_lock, flags1);
3875 }
3876 spin_unlock_irqrestore(&device_domain_lock, flags1);
3877}
3878
Weidong Han5e98c4b2008-12-08 23:03:27 +08003879/* domain id for virtual machine, it won't be set in context */
3880static unsigned long vm_domid;
3881
3882static struct dmar_domain *iommu_alloc_vm_domain(void)
3883{
3884 struct dmar_domain *domain;
3885
3886 domain = alloc_domain_mem();
3887 if (!domain)
3888 return NULL;
3889
3890 domain->id = vm_domid++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003891 domain->nid = -1;
Mike Travis1b198bb2012-03-05 15:05:16 -08003892 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003893 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3894
3895 return domain;
3896}
3897
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003898static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003899{
3900 int adjust_width;
3901
3902 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003903 spin_lock_init(&domain->iommu_lock);
3904
3905 domain_reserve_special_ranges(domain);
3906
3907 /* calculate AGAW */
3908 domain->gaw = guest_width;
3909 adjust_width = guestwidth_to_adjustwidth(guest_width);
3910 domain->agaw = width_to_agaw(adjust_width);
3911
3912 INIT_LIST_HEAD(&domain->devices);
3913
3914 domain->iommu_count = 0;
3915 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003916 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01003917 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003918 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003919 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003920
3921 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07003922 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003923 if (!domain->pgd)
3924 return -ENOMEM;
3925 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3926 return 0;
3927}
3928
3929static void iommu_free_vm_domain(struct dmar_domain *domain)
3930{
3931 unsigned long flags;
3932 struct dmar_drhd_unit *drhd;
3933 struct intel_iommu *iommu;
3934 unsigned long i;
3935 unsigned long ndomains;
3936
3937 for_each_drhd_unit(drhd) {
3938 if (drhd->ignored)
3939 continue;
3940 iommu = drhd->iommu;
3941
3942 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08003943 for_each_set_bit(i, iommu->domain_ids, ndomains) {
Weidong Han5e98c4b2008-12-08 23:03:27 +08003944 if (iommu->domains[i] == domain) {
3945 spin_lock_irqsave(&iommu->lock, flags);
3946 clear_bit(i, iommu->domain_ids);
3947 iommu->domains[i] = NULL;
3948 spin_unlock_irqrestore(&iommu->lock, flags);
3949 break;
3950 }
Weidong Han5e98c4b2008-12-08 23:03:27 +08003951 }
3952 }
3953}
3954
3955static void vm_domain_exit(struct dmar_domain *domain)
3956{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003957 /* Domain 0 is reserved, so dont process it */
3958 if (!domain)
3959 return;
3960
3961 vm_domain_remove_all_dev_info(domain);
3962 /* destroy iovas */
3963 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003964
3965 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003966 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003967
3968 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003969 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003970
3971 iommu_free_vm_domain(domain);
3972 free_domain_mem(domain);
3973}
3974
Joerg Roedel5d450802008-12-03 14:52:32 +01003975static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003976{
Joerg Roedel5d450802008-12-03 14:52:32 +01003977 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003978
Joerg Roedel5d450802008-12-03 14:52:32 +01003979 dmar_domain = iommu_alloc_vm_domain();
3980 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003981 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003982 "intel_iommu_domain_init: dmar_domain == NULL\n");
3983 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003984 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003985 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003986 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003987 "intel_iommu_domain_init() failed\n");
3988 vm_domain_exit(dmar_domain);
3989 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003990 }
Allen Kay8140a952011-10-14 12:32:17 -07003991 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01003992 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003993
Joerg Roedel8a0e7152012-01-26 19:40:54 +01003994 domain->geometry.aperture_start = 0;
3995 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
3996 domain->geometry.force_aperture = true;
3997
Joerg Roedel5d450802008-12-03 14:52:32 +01003998 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003999}
Kay, Allen M38717942008-09-09 18:37:29 +03004000
Joerg Roedel5d450802008-12-03 14:52:32 +01004001static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004002{
Joerg Roedel5d450802008-12-03 14:52:32 +01004003 struct dmar_domain *dmar_domain = domain->priv;
4004
4005 domain->priv = NULL;
4006 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004007}
Kay, Allen M38717942008-09-09 18:37:29 +03004008
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004009static int intel_iommu_attach_device(struct iommu_domain *domain,
4010 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004011{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004012 struct dmar_domain *dmar_domain = domain->priv;
4013 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004014 struct intel_iommu *iommu;
4015 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03004016
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004017 /* normally pdev is not mapped */
4018 if (unlikely(domain_context_mapped(pdev))) {
4019 struct dmar_domain *old_domain;
4020
4021 old_domain = find_domain(pdev);
4022 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004023 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
4024 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
4025 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004026 else
4027 domain_remove_dev_info(old_domain);
4028 }
4029 }
4030
David Woodhouse276dbf992009-04-04 01:45:37 +01004031 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
4032 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004033 if (!iommu)
4034 return -ENODEV;
4035
4036 /* check if this iommu agaw is sufficient for max mapped address */
4037 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004038 if (addr_width > cap_mgaw(iommu->cap))
4039 addr_width = cap_mgaw(iommu->cap);
4040
4041 if (dmar_domain->max_addr > (1LL << addr_width)) {
4042 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004043 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004044 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004045 return -EFAULT;
4046 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004047 dmar_domain->gaw = addr_width;
4048
4049 /*
4050 * Knock out extra levels of page tables if necessary
4051 */
4052 while (iommu->agaw < dmar_domain->agaw) {
4053 struct dma_pte *pte;
4054
4055 pte = dmar_domain->pgd;
4056 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004057 dmar_domain->pgd = (struct dma_pte *)
4058 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004059 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004060 }
4061 dmar_domain->agaw--;
4062 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004063
David Woodhouse5fe60f42009-08-09 10:53:41 +01004064 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004065}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004066
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004067static void intel_iommu_detach_device(struct iommu_domain *domain,
4068 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004069{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004070 struct dmar_domain *dmar_domain = domain->priv;
4071 struct pci_dev *pdev = to_pci_dev(dev);
4072
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004073 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03004074}
Kay, Allen M38717942008-09-09 18:37:29 +03004075
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004076static int intel_iommu_map(struct iommu_domain *domain,
4077 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004078 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004079{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004080 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004081 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004082 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004083 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004084
Joerg Roedeldde57a22008-12-03 15:04:09 +01004085 if (iommu_prot & IOMMU_READ)
4086 prot |= DMA_PTE_READ;
4087 if (iommu_prot & IOMMU_WRITE)
4088 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004089 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4090 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004091
David Woodhouse163cc522009-06-28 00:51:17 +01004092 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004093 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004094 u64 end;
4095
4096 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004097 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004098 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004099 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004100 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004101 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004102 return -EFAULT;
4103 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004104 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004105 }
David Woodhousead051222009-06-28 14:22:28 +01004106 /* Round up size to next multiple of PAGE_SIZE, if it and
4107 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004108 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004109 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4110 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004111 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004112}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004113
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004114static size_t intel_iommu_unmap(struct iommu_domain *domain,
4115 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004116{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004117 struct dmar_domain *dmar_domain = domain->priv;
Allen Kay292827c2011-10-14 12:31:54 -07004118 int order;
Sheng Yang4b99d352009-07-08 11:52:52 +01004119
Allen Kay292827c2011-10-14 12:31:54 -07004120 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
David Woodhouse163cc522009-06-28 00:51:17 +01004121 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004122
David Woodhouse163cc522009-06-28 00:51:17 +01004123 if (dmar_domain->max_addr == iova + size)
4124 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004125
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004126 return PAGE_SIZE << order;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004127}
Kay, Allen M38717942008-09-09 18:37:29 +03004128
Joerg Roedeld14d6572008-12-03 15:06:57 +01004129static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05304130 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004131{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004132 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004133 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004134 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004135
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004136 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
Kay, Allen M38717942008-09-09 18:37:29 +03004137 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004138 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004139
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004140 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004141}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004142
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004143static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4144 unsigned long cap)
4145{
4146 struct dmar_domain *dmar_domain = domain->priv;
4147
4148 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4149 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004150 if (cap == IOMMU_CAP_INTR_REMAP)
Suresh Siddha95a02e92012-03-30 11:47:07 -07004151 return irq_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004152
4153 return 0;
4154}
4155
Alex Williamson783f1572012-05-30 14:19:43 -06004156#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4157
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004158static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004159{
4160 struct pci_dev *pdev = to_pci_dev(dev);
Alex Williamson3da4af0a2012-11-13 10:22:03 -07004161 struct pci_dev *bridge, *dma_pdev = NULL;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004162 struct iommu_group *group;
4163 int ret;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004164
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004165 if (!device_to_iommu(pci_domain_nr(pdev->bus),
4166 pdev->bus->number, pdev->devfn))
Alex Williamson70ae6f02011-10-21 15:56:11 -04004167 return -ENODEV;
4168
4169 bridge = pci_find_upstream_pcie_bridge(pdev);
4170 if (bridge) {
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004171 if (pci_is_pcie(bridge))
4172 dma_pdev = pci_get_domain_bus_and_slot(
4173 pci_domain_nr(pdev->bus),
4174 bridge->subordinate->number, 0);
Alex Williamson3da4af0a2012-11-13 10:22:03 -07004175 if (!dma_pdev)
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004176 dma_pdev = pci_dev_get(bridge);
4177 } else
4178 dma_pdev = pci_dev_get(pdev);
4179
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004180 /* Account for quirked devices */
Alex Williamson783f1572012-05-30 14:19:43 -06004181 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
4182
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004183 /*
4184 * If it's a multifunction device that does not support our
Alex Williamsonc14d2692013-05-30 12:39:18 -06004185 * required ACS flags, add to the same group as lowest numbered
4186 * function that also does not suport the required ACS flags.
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004187 */
Alex Williamson783f1572012-05-30 14:19:43 -06004188 if (dma_pdev->multifunction &&
Alex Williamsonc14d2692013-05-30 12:39:18 -06004189 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
4190 u8 i, slot = PCI_SLOT(dma_pdev->devfn);
4191
4192 for (i = 0; i < 8; i++) {
4193 struct pci_dev *tmp;
4194
4195 tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
4196 if (!tmp)
4197 continue;
4198
4199 if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
4200 swap_pci_ref(&dma_pdev, tmp);
4201 break;
4202 }
4203 pci_dev_put(tmp);
4204 }
4205 }
Alex Williamson783f1572012-05-30 14:19:43 -06004206
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004207 /*
4208 * Devices on the root bus go through the iommu. If that's not us,
4209 * find the next upstream device and test ACS up to the root bus.
4210 * Finding the next device may require skipping virtual buses.
4211 */
Alex Williamson783f1572012-05-30 14:19:43 -06004212 while (!pci_is_root_bus(dma_pdev->bus)) {
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004213 struct pci_bus *bus = dma_pdev->bus;
4214
4215 while (!bus->self) {
4216 if (!pci_is_root_bus(bus))
4217 bus = bus->parent;
4218 else
4219 goto root_bus;
4220 }
4221
4222 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
Alex Williamson783f1572012-05-30 14:19:43 -06004223 break;
4224
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004225 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
Alex Williamson70ae6f02011-10-21 15:56:11 -04004226 }
4227
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004228root_bus:
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004229 group = iommu_group_get(&dma_pdev->dev);
4230 pci_dev_put(dma_pdev);
4231 if (!group) {
4232 group = iommu_group_alloc();
4233 if (IS_ERR(group))
4234 return PTR_ERR(group);
4235 }
Alex Williamsonbcb71ab2011-10-21 15:56:24 -04004236
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004237 ret = iommu_group_add_device(group, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004238
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004239 iommu_group_put(group);
4240 return ret;
4241}
4242
4243static void intel_iommu_remove_device(struct device *dev)
4244{
4245 iommu_group_remove_device(dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004246}
4247
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004248static struct iommu_ops intel_iommu_ops = {
4249 .domain_init = intel_iommu_domain_init,
4250 .domain_destroy = intel_iommu_domain_destroy,
4251 .attach_dev = intel_iommu_attach_device,
4252 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004253 .map = intel_iommu_map,
4254 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004255 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004256 .domain_has_cap = intel_iommu_domain_has_cap,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004257 .add_device = intel_iommu_add_device,
4258 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004259 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004260};
David Woodhouse9af88142009-02-13 23:18:03 +00004261
Daniel Vetter94526182013-01-20 23:50:13 +01004262static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4263{
4264 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4265 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4266 dmar_map_gfx = 0;
4267}
4268
4269DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4270DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4271DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4272DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4273DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4274DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4275DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4276
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004277static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004278{
4279 /*
4280 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004281 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004282 */
4283 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4284 rwbf_quirk = 1;
4285}
4286
4287DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004288DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4289DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4290DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4291DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4292DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4293DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004294
Adam Jacksoneecfd572010-08-25 21:17:34 +01004295#define GGC 0x52
4296#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4297#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4298#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4299#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4300#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4301#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4302#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4303#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4304
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004305static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004306{
4307 unsigned short ggc;
4308
Adam Jacksoneecfd572010-08-25 21:17:34 +01004309 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004310 return;
4311
Adam Jacksoneecfd572010-08-25 21:17:34 +01004312 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004313 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4314 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004315 } else if (dmar_map_gfx) {
4316 /* we have to ensure the gfx device is idle before we flush */
4317 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4318 intel_iommu_strict = 1;
4319 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004320}
4321DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4322DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4323DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4324DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4325
David Woodhousee0fc7e02009-09-30 09:12:17 -07004326/* On Tylersburg chipsets, some BIOSes have been known to enable the
4327 ISOCH DMAR unit for the Azalia sound device, but not give it any
4328 TLB entries, which causes it to deadlock. Check for that. We do
4329 this in a function called from init_dmars(), instead of in a PCI
4330 quirk, because we don't want to print the obnoxious "BIOS broken"
4331 message if VT-d is actually disabled.
4332*/
4333static void __init check_tylersburg_isoch(void)
4334{
4335 struct pci_dev *pdev;
4336 uint32_t vtisochctrl;
4337
4338 /* If there's no Azalia in the system anyway, forget it. */
4339 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4340 if (!pdev)
4341 return;
4342 pci_dev_put(pdev);
4343
4344 /* System Management Registers. Might be hidden, in which case
4345 we can't do the sanity check. But that's OK, because the
4346 known-broken BIOSes _don't_ actually hide it, so far. */
4347 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4348 if (!pdev)
4349 return;
4350
4351 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4352 pci_dev_put(pdev);
4353 return;
4354 }
4355
4356 pci_dev_put(pdev);
4357
4358 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4359 if (vtisochctrl & 1)
4360 return;
4361
4362 /* Drop all bits other than the number of TLB entries */
4363 vtisochctrl &= 0x1c;
4364
4365 /* If we have the recommended number of TLB entries (16), fine. */
4366 if (vtisochctrl == 0x10)
4367 return;
4368
4369 /* Zero TLB entries? You get to ride the short bus to school. */
4370 if (!vtisochctrl) {
4371 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4372 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4373 dmi_get_system_info(DMI_BIOS_VENDOR),
4374 dmi_get_system_info(DMI_BIOS_VERSION),
4375 dmi_get_system_info(DMI_PRODUCT_VERSION));
4376 iommu_identity_mapping |= IDENTMAP_AZALIA;
4377 return;
4378 }
4379
4380 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4381 vtisochctrl);
4382}