blob: 59779e19315e1c55eef91bdc0dbe067c1ff701c0 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040027#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070028#include <linux/slab.h>
29#include <linux/irq.h>
30#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070031#include <linux/spinlock.h>
32#include <linux/pci.h>
33#include <linux/dmar.h>
34#include <linux/dma-mapping.h>
35#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080044#include <linux/memblock.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070045#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090047#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070048
Joerg Roedel078e1ee2012-09-26 12:44:43 +020049#include "irq_remapping.h"
Varun Sethi61e015a2013-04-23 10:05:24 +053050#include "pci.h"
Joerg Roedel078e1ee2012-09-26 12:44:43 +020051
Fenghua Yu5b6985c2008-10-16 18:02:32 -070052#define ROOT_SIZE VTD_PAGE_SIZE
53#define CONTEXT_SIZE VTD_PAGE_SIZE
54
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
56#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070057#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058
59#define IOAPIC_RANGE_START (0xfee00000)
60#define IOAPIC_RANGE_END (0xfeefffff)
61#define IOVA_START_ADDR (0x1000)
62
63#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
64
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070065#define MAX_AGAW_WIDTH 64
66
David Woodhouse2ebe3152009-09-19 07:34:04 -070067#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
68#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
69
70/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
71 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
72#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
73 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
74#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070075
Mark McLoughlinf27be032008-11-20 15:49:43 +000076#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070077#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070078#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080079
Andrew Mortondf08cdc2010-09-22 13:05:11 -070080/* page table handling */
81#define LEVEL_STRIDE (9)
82#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
83
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020084/*
85 * This bitmap is used to advertise the page sizes our hardware support
86 * to the IOMMU core, which will then use this information to split
87 * physically contiguous memory regions it is mapping into page sizes
88 * that we support.
89 *
90 * Traditionally the IOMMU core just handed us the mappings directly,
91 * after making sure the size is an order of a 4KiB page and that the
92 * mapping has natural alignment.
93 *
94 * To retain this behavior, we currently advertise that we support
95 * all page sizes that are an order of 4KiB.
96 *
97 * If at some point we'd like to utilize the IOMMU core's new behavior,
98 * we could change this to advertise the real page sizes we support.
99 */
100#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
101
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700102static inline int agaw_to_level(int agaw)
103{
104 return agaw + 2;
105}
106
107static inline int agaw_to_width(int agaw)
108{
109 return 30 + agaw * LEVEL_STRIDE;
110}
111
112static inline int width_to_agaw(int width)
113{
114 return (width - 30) / LEVEL_STRIDE;
115}
116
117static inline unsigned int level_to_offset_bits(int level)
118{
119 return (level - 1) * LEVEL_STRIDE;
120}
121
122static inline int pfn_level_offset(unsigned long pfn, int level)
123{
124 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
125}
126
127static inline unsigned long level_mask(int level)
128{
129 return -1UL << level_to_offset_bits(level);
130}
131
132static inline unsigned long level_size(int level)
133{
134 return 1UL << level_to_offset_bits(level);
135}
136
137static inline unsigned long align_to_level(unsigned long pfn, int level)
138{
139 return (pfn + level_size(level) - 1) & level_mask(level);
140}
David Woodhousefd18de52009-05-10 23:57:41 +0100141
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100142static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
143{
144 return 1 << ((lvl - 1) * LEVEL_STRIDE);
145}
146
David Woodhousedd4e8312009-06-27 16:21:20 +0100147/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
148 are never going to work. */
149static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
150{
151 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
152}
153
154static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
155{
156 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
157}
158static inline unsigned long page_to_dma_pfn(struct page *pg)
159{
160 return mm_to_dma_pfn(page_to_pfn(pg));
161}
162static inline unsigned long virt_to_dma_pfn(void *p)
163{
164 return page_to_dma_pfn(virt_to_page(p));
165}
166
Weidong Hand9630fe2008-12-08 11:06:32 +0800167/* global iommu list, set NULL for ignored DMAR units */
168static struct intel_iommu **g_iommus;
169
David Woodhousee0fc7e02009-09-30 09:12:17 -0700170static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000171static int rwbf_quirk;
172
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000173/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700174 * set to 1 to panic kernel if can't successfully enable VT-d
175 * (used when kernel is launched w/ TXT)
176 */
177static int force_on = 0;
178
179/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000180 * 0: Present
181 * 1-11: Reserved
182 * 12-63: Context Ptr (12 - (haw-1))
183 * 64-127: Reserved
184 */
185struct root_entry {
186 u64 val;
187 u64 rsvd1;
188};
189#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
190static inline bool root_present(struct root_entry *root)
191{
192 return (root->val & 1);
193}
194static inline void set_root_present(struct root_entry *root)
195{
196 root->val |= 1;
197}
198static inline void set_root_value(struct root_entry *root, unsigned long value)
199{
200 root->val |= value & VTD_PAGE_MASK;
201}
202
203static inline struct context_entry *
204get_context_addr_from_root(struct root_entry *root)
205{
206 return (struct context_entry *)
207 (root_present(root)?phys_to_virt(
208 root->val & VTD_PAGE_MASK) :
209 NULL);
210}
211
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000212/*
213 * low 64 bits:
214 * 0: present
215 * 1: fault processing disable
216 * 2-3: translation type
217 * 12-63: address space root
218 * high 64 bits:
219 * 0-2: address width
220 * 3-6: aval
221 * 8-23: domain id
222 */
223struct context_entry {
224 u64 lo;
225 u64 hi;
226};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000227
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000228static inline bool context_present(struct context_entry *context)
229{
230 return (context->lo & 1);
231}
232static inline void context_set_present(struct context_entry *context)
233{
234 context->lo |= 1;
235}
236
237static inline void context_set_fault_enable(struct context_entry *context)
238{
239 context->lo &= (((u64)-1) << 2) | 1;
240}
241
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000242static inline void context_set_translation_type(struct context_entry *context,
243 unsigned long value)
244{
245 context->lo &= (((u64)-1) << 4) | 3;
246 context->lo |= (value & 3) << 2;
247}
248
249static inline void context_set_address_root(struct context_entry *context,
250 unsigned long value)
251{
252 context->lo |= value & VTD_PAGE_MASK;
253}
254
255static inline void context_set_address_width(struct context_entry *context,
256 unsigned long value)
257{
258 context->hi |= value & 7;
259}
260
261static inline void context_set_domain_id(struct context_entry *context,
262 unsigned long value)
263{
264 context->hi |= (value & ((1 << 16) - 1)) << 8;
265}
266
267static inline void context_clear_entry(struct context_entry *context)
268{
269 context->lo = 0;
270 context->hi = 0;
271}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000272
Mark McLoughlin622ba122008-11-20 15:49:46 +0000273/*
274 * 0: readable
275 * 1: writable
276 * 2-6: reserved
277 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800278 * 8-10: available
279 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000280 * 12-63: Host physcial address
281 */
282struct dma_pte {
283 u64 val;
284};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000285
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000286static inline void dma_clear_pte(struct dma_pte *pte)
287{
288 pte->val = 0;
289}
290
291static inline void dma_set_pte_readable(struct dma_pte *pte)
292{
293 pte->val |= DMA_PTE_READ;
294}
295
296static inline void dma_set_pte_writable(struct dma_pte *pte)
297{
298 pte->val |= DMA_PTE_WRITE;
299}
300
Sheng Yang9cf066972009-03-18 15:33:07 +0800301static inline void dma_set_pte_snp(struct dma_pte *pte)
302{
303 pte->val |= DMA_PTE_SNP;
304}
305
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000306static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
307{
308 pte->val = (pte->val & ~3) | (prot & 3);
309}
310
311static inline u64 dma_pte_addr(struct dma_pte *pte)
312{
David Woodhousec85994e2009-07-01 19:21:24 +0100313#ifdef CONFIG_64BIT
314 return pte->val & VTD_PAGE_MASK;
315#else
316 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100317 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100318#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000319}
320
David Woodhousedd4e8312009-06-27 16:21:20 +0100321static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000322{
David Woodhousedd4e8312009-06-27 16:21:20 +0100323 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000324}
325
326static inline bool dma_pte_present(struct dma_pte *pte)
327{
328 return (pte->val & 3) != 0;
329}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000330
Allen Kay4399c8b2011-10-14 12:32:46 -0700331static inline bool dma_pte_superpage(struct dma_pte *pte)
332{
333 return (pte->val & (1 << 7));
334}
335
David Woodhouse75e6bf92009-07-02 11:21:16 +0100336static inline int first_pte_in_page(struct dma_pte *pte)
337{
338 return !((unsigned long)pte & ~VTD_PAGE_MASK);
339}
340
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700341/*
342 * This domain is a statically identity mapping domain.
343 * 1. This domain creats a static 1:1 mapping to all usable memory.
344 * 2. It maps to each iommu if successful.
345 * 3. Each iommu mapps to this domain if successful.
346 */
David Woodhouse19943b02009-08-04 16:19:20 +0100347static struct dmar_domain *si_domain;
348static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700349
Weidong Han3b5410e2008-12-08 09:17:15 +0800350/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100351#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800352
Weidong Han1ce28fe2008-12-08 16:35:39 +0800353/* domain represents a virtual machine, more than one devices
354 * across iommus may be owned in one domain, e.g. kvm guest.
355 */
356#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
357
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700358/* si_domain contains mulitple devices */
359#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
360
Mike Travis1b198bb2012-03-05 15:05:16 -0800361/* define the limit of IOMMUs supported in each domain */
362#ifdef CONFIG_X86
363# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
364#else
365# define IOMMU_UNITS_SUPPORTED 64
366#endif
367
Mark McLoughlin99126f72008-11-20 15:49:47 +0000368struct dmar_domain {
369 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700370 int nid; /* node id */
Mike Travis1b198bb2012-03-05 15:05:16 -0800371 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
372 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000373
374 struct list_head devices; /* all devices' list */
375 struct iova_domain iovad; /* iova's that belong to this domain */
376
377 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000378 int gaw; /* max guest address width */
379
380 /* adjusted guest address width, 0 is level 2 30-bit */
381 int agaw;
382
Weidong Han3b5410e2008-12-08 09:17:15 +0800383 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800384
385 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800386 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800387 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100388 int iommu_superpage;/* Level of superpages supported:
389 0 == 4KiB (no superpages), 1 == 2MiB,
390 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800391 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800392 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000393};
394
Mark McLoughlina647dac2008-11-20 15:49:48 +0000395/* PCI domain-device relationship */
396struct device_domain_info {
397 struct list_head link; /* link to domain siblings */
398 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100399 int segment; /* PCI domain */
400 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000401 u8 devfn; /* PCI devfn number */
Stefan Assmann45e829e2009-12-03 06:49:24 -0500402 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800403 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000404 struct dmar_domain *domain; /* pointer to domain */
405};
406
mark gross5e0d2a62008-03-04 15:22:08 -0800407static void flush_unmaps_timeout(unsigned long data);
408
409DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
410
mark gross80b20dd2008-04-18 13:53:58 -0700411#define HIGH_WATER_MARK 250
412struct deferred_flush_tables {
413 int next;
414 struct iova *iova[HIGH_WATER_MARK];
415 struct dmar_domain *domain[HIGH_WATER_MARK];
416};
417
418static struct deferred_flush_tables *deferred_flush;
419
mark gross5e0d2a62008-03-04 15:22:08 -0800420/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800421static int g_num_of_iommus;
422
423static DEFINE_SPINLOCK(async_umap_flush_lock);
424static LIST_HEAD(unmaps_to_do);
425
426static int timer_on;
427static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800428
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700429static void domain_remove_dev_info(struct dmar_domain *domain);
430
Suresh Siddhad3f13812011-08-23 17:05:25 -0700431#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800432int dmar_disabled = 0;
433#else
434int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700435#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800436
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200437int intel_iommu_enabled = 0;
438EXPORT_SYMBOL_GPL(intel_iommu_enabled);
439
David Woodhouse2d9e6672010-06-15 10:57:57 +0100440static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700441static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800442static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100443static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700444
David Woodhousec0771df2011-10-14 20:59:46 +0100445int intel_iommu_gfx_mapped;
446EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
447
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700448#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
449static DEFINE_SPINLOCK(device_domain_lock);
450static LIST_HEAD(device_domain_list);
451
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100452static struct iommu_ops intel_iommu_ops;
453
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700454static int __init intel_iommu_setup(char *str)
455{
456 if (!str)
457 return -EINVAL;
458 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800459 if (!strncmp(str, "on", 2)) {
460 dmar_disabled = 0;
461 printk(KERN_INFO "Intel-IOMMU: enabled\n");
462 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700463 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800464 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700465 } else if (!strncmp(str, "igfx_off", 8)) {
466 dmar_map_gfx = 0;
467 printk(KERN_INFO
468 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700469 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800470 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700471 "Intel-IOMMU: Forcing DAC for PCI devices\n");
472 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800473 } else if (!strncmp(str, "strict", 6)) {
474 printk(KERN_INFO
475 "Intel-IOMMU: disable batched IOTLB flush\n");
476 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100477 } else if (!strncmp(str, "sp_off", 6)) {
478 printk(KERN_INFO
479 "Intel-IOMMU: disable supported super page\n");
480 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700481 }
482
483 str += strcspn(str, ",");
484 while (*str == ',')
485 str++;
486 }
487 return 0;
488}
489__setup("intel_iommu=", intel_iommu_setup);
490
491static struct kmem_cache *iommu_domain_cache;
492static struct kmem_cache *iommu_devinfo_cache;
493static struct kmem_cache *iommu_iova_cache;
494
Suresh Siddha4c923d42009-10-02 11:01:24 -0700495static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700496{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700497 struct page *page;
498 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700499
Suresh Siddha4c923d42009-10-02 11:01:24 -0700500 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
501 if (page)
502 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700503 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700504}
505
506static inline void free_pgtable_page(void *vaddr)
507{
508 free_page((unsigned long)vaddr);
509}
510
511static inline void *alloc_domain_mem(void)
512{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900513 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700514}
515
Kay, Allen M38717942008-09-09 18:37:29 +0300516static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700517{
518 kmem_cache_free(iommu_domain_cache, vaddr);
519}
520
521static inline void * alloc_devinfo_mem(void)
522{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900523 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700524}
525
526static inline void free_devinfo_mem(void *vaddr)
527{
528 kmem_cache_free(iommu_devinfo_cache, vaddr);
529}
530
531struct iova *alloc_iova_mem(void)
532{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900533 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700534}
535
536void free_iova_mem(struct iova *iova)
537{
538 kmem_cache_free(iommu_iova_cache, iova);
539}
540
Weidong Han1b573682008-12-08 15:34:06 +0800541
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700542static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800543{
544 unsigned long sagaw;
545 int agaw = -1;
546
547 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700548 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800549 agaw >= 0; agaw--) {
550 if (test_bit(agaw, &sagaw))
551 break;
552 }
553
554 return agaw;
555}
556
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700557/*
558 * Calculate max SAGAW for each iommu.
559 */
560int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
561{
562 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
563}
564
565/*
566 * calculate agaw for each iommu.
567 * "SAGAW" may be different across iommus, use a default agaw, and
568 * get a supported less agaw for iommus that don't support the default agaw.
569 */
570int iommu_calculate_agaw(struct intel_iommu *iommu)
571{
572 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
573}
574
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700575/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800576static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
577{
578 int iommu_id;
579
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700580 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800581 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700582 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800583
Mike Travis1b198bb2012-03-05 15:05:16 -0800584 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800585 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
586 return NULL;
587
588 return g_iommus[iommu_id];
589}
590
Weidong Han8e6040972008-12-08 15:49:06 +0800591static void domain_update_iommu_coherency(struct dmar_domain *domain)
592{
593 int i;
594
Alex Williamson2e12bc22011-11-11 17:26:44 -0700595 i = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
596
597 domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800598
Mike Travis1b198bb2012-03-05 15:05:16 -0800599 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Weidong Han8e6040972008-12-08 15:49:06 +0800600 if (!ecap_coherent(g_iommus[i]->ecap)) {
601 domain->iommu_coherency = 0;
602 break;
603 }
Weidong Han8e6040972008-12-08 15:49:06 +0800604 }
605}
606
Sheng Yang58c610b2009-03-18 15:33:05 +0800607static void domain_update_iommu_snooping(struct dmar_domain *domain)
608{
609 int i;
610
611 domain->iommu_snooping = 1;
612
Mike Travis1b198bb2012-03-05 15:05:16 -0800613 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800614 if (!ecap_sc_support(g_iommus[i]->ecap)) {
615 domain->iommu_snooping = 0;
616 break;
617 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800618 }
619}
620
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100621static void domain_update_iommu_superpage(struct dmar_domain *domain)
622{
Allen Kay8140a952011-10-14 12:32:17 -0700623 struct dmar_drhd_unit *drhd;
624 struct intel_iommu *iommu = NULL;
625 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100626
627 if (!intel_iommu_superpage) {
628 domain->iommu_superpage = 0;
629 return;
630 }
631
Allen Kay8140a952011-10-14 12:32:17 -0700632 /* set iommu_superpage to the smallest common denominator */
633 for_each_active_iommu(iommu, drhd) {
634 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100635 if (!mask) {
636 break;
637 }
638 }
639 domain->iommu_superpage = fls(mask);
640}
641
Sheng Yang58c610b2009-03-18 15:33:05 +0800642/* Some capabilities may be different across iommus */
643static void domain_update_iommu_cap(struct dmar_domain *domain)
644{
645 domain_update_iommu_coherency(domain);
646 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100647 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800648}
649
David Woodhouse276dbf992009-04-04 01:45:37 +0100650static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800651{
652 struct dmar_drhd_unit *drhd = NULL;
653 int i;
654
655 for_each_drhd_unit(drhd) {
656 if (drhd->ignored)
657 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100658 if (segment != drhd->segment)
659 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800660
David Woodhouse924b6232009-04-04 00:39:25 +0100661 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000662 if (drhd->devices[i] &&
663 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800664 drhd->devices[i]->devfn == devfn)
665 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700666 if (drhd->devices[i] &&
667 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100668 drhd->devices[i]->subordinate->number <= bus &&
Yinghai Lub918c622012-05-17 18:51:11 -0700669 drhd->devices[i]->subordinate->busn_res.end >= bus)
David Woodhouse924b6232009-04-04 00:39:25 +0100670 return drhd->iommu;
671 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800672
673 if (drhd->include_all)
674 return drhd->iommu;
675 }
676
677 return NULL;
678}
679
Weidong Han5331fe62008-12-08 23:00:00 +0800680static void domain_flush_cache(struct dmar_domain *domain,
681 void *addr, int size)
682{
683 if (!domain->iommu_coherency)
684 clflush_cache_range(addr, size);
685}
686
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700687/* Gets context entry for a given bus and devfn */
688static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
689 u8 bus, u8 devfn)
690{
691 struct root_entry *root;
692 struct context_entry *context;
693 unsigned long phy_addr;
694 unsigned long flags;
695
696 spin_lock_irqsave(&iommu->lock, flags);
697 root = &iommu->root_entry[bus];
698 context = get_context_addr_from_root(root);
699 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700700 context = (struct context_entry *)
701 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700702 if (!context) {
703 spin_unlock_irqrestore(&iommu->lock, flags);
704 return NULL;
705 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700706 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700707 phy_addr = virt_to_phys((void *)context);
708 set_root_value(root, phy_addr);
709 set_root_present(root);
710 __iommu_flush_cache(iommu, root, sizeof(*root));
711 }
712 spin_unlock_irqrestore(&iommu->lock, flags);
713 return &context[devfn];
714}
715
716static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
717{
718 struct root_entry *root;
719 struct context_entry *context;
720 int ret;
721 unsigned long flags;
722
723 spin_lock_irqsave(&iommu->lock, flags);
724 root = &iommu->root_entry[bus];
725 context = get_context_addr_from_root(root);
726 if (!context) {
727 ret = 0;
728 goto out;
729 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000730 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700731out:
732 spin_unlock_irqrestore(&iommu->lock, flags);
733 return ret;
734}
735
736static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
737{
738 struct root_entry *root;
739 struct context_entry *context;
740 unsigned long flags;
741
742 spin_lock_irqsave(&iommu->lock, flags);
743 root = &iommu->root_entry[bus];
744 context = get_context_addr_from_root(root);
745 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000746 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700747 __iommu_flush_cache(iommu, &context[devfn], \
748 sizeof(*context));
749 }
750 spin_unlock_irqrestore(&iommu->lock, flags);
751}
752
753static void free_context_table(struct intel_iommu *iommu)
754{
755 struct root_entry *root;
756 int i;
757 unsigned long flags;
758 struct context_entry *context;
759
760 spin_lock_irqsave(&iommu->lock, flags);
761 if (!iommu->root_entry) {
762 goto out;
763 }
764 for (i = 0; i < ROOT_ENTRY_NR; i++) {
765 root = &iommu->root_entry[i];
766 context = get_context_addr_from_root(root);
767 if (context)
768 free_pgtable_page(context);
769 }
770 free_pgtable_page(iommu->root_entry);
771 iommu->root_entry = NULL;
772out:
773 spin_unlock_irqrestore(&iommu->lock, flags);
774}
775
David Woodhouseb026fd22009-06-28 10:37:25 +0100776static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
Allen Kay4399c8b2011-10-14 12:32:46 -0700777 unsigned long pfn, int target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700778{
David Woodhouseb026fd22009-06-28 10:37:25 +0100779 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700780 struct dma_pte *parent, *pte = NULL;
781 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700782 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700783
784 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200785
786 if (addr_width < BITS_PER_LONG && pfn >> addr_width)
787 /* Address beyond IOMMU's addressing capabilities. */
788 return NULL;
789
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700790 parent = domain->pgd;
791
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700792 while (level > 0) {
793 void *tmp_page;
794
David Woodhouseb026fd22009-06-28 10:37:25 +0100795 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700796 pte = &parent[offset];
Allen Kay4399c8b2011-10-14 12:32:46 -0700797 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100798 break;
799 if (level == target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700800 break;
801
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000802 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100803 uint64_t pteval;
804
Suresh Siddha4c923d42009-10-02 11:01:24 -0700805 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700806
David Woodhouse206a73c12009-07-01 19:30:28 +0100807 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700808 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100809
David Woodhousec85994e2009-07-01 19:21:24 +0100810 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400811 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100812 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
813 /* Someone else set it while we were thinking; use theirs. */
814 free_pgtable_page(tmp_page);
815 } else {
816 dma_pte_addr(pte);
817 domain_flush_cache(domain, pte, sizeof(*pte));
818 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700819 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000820 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700821 level--;
822 }
823
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700824 return pte;
825}
826
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100827
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700828/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100829static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
830 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100831 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700832{
833 struct dma_pte *parent, *pte = NULL;
834 int total = agaw_to_level(domain->agaw);
835 int offset;
836
837 parent = domain->pgd;
838 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100839 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700840 pte = &parent[offset];
841 if (level == total)
842 return pte;
843
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100844 if (!dma_pte_present(pte)) {
845 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700846 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100847 }
848
849 if (pte->val & DMA_PTE_LARGE_PAGE) {
850 *large_page = total;
851 return pte;
852 }
853
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000854 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700855 total--;
856 }
857 return NULL;
858}
859
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700860/* clear last level pte, a tlb flush should be followed */
Allen Kay292827c2011-10-14 12:31:54 -0700861static int dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100862 unsigned long start_pfn,
863 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700864{
David Woodhouse04b18e62009-06-27 19:15:01 +0100865 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100866 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100867 struct dma_pte *first_pte, *pte;
Allen Kay292827c2011-10-14 12:31:54 -0700868 int order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700869
David Woodhouse04b18e62009-06-27 19:15:01 +0100870 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100871 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700872 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100873
David Woodhouse04b18e62009-06-27 19:15:01 +0100874 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700875 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100876 large_page = 1;
877 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100878 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100879 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100880 continue;
881 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100882 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100883 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100884 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100885 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100886 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
887
David Woodhouse310a5ab2009-06-28 18:52:20 +0100888 domain_flush_cache(domain, first_pte,
889 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700890
891 } while (start_pfn && start_pfn <= last_pfn);
Allen Kay292827c2011-10-14 12:31:54 -0700892
893 order = (large_page - 1) * 9;
894 return order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700895}
896
Alex Williamson3269ee02013-06-15 10:27:19 -0600897static void dma_pte_free_level(struct dmar_domain *domain, int level,
898 struct dma_pte *pte, unsigned long pfn,
899 unsigned long start_pfn, unsigned long last_pfn)
900{
901 pfn = max(start_pfn, pfn);
902 pte = &pte[pfn_level_offset(pfn, level)];
903
904 do {
905 unsigned long level_pfn;
906 struct dma_pte *level_pte;
907
908 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
909 goto next;
910
911 level_pfn = pfn & level_mask(level - 1);
912 level_pte = phys_to_virt(dma_pte_addr(pte));
913
914 if (level > 2)
915 dma_pte_free_level(domain, level - 1, level_pte,
916 level_pfn, start_pfn, last_pfn);
917
918 /* If range covers entire pagetable, free it */
919 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -0800920 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -0600921 dma_clear_pte(pte);
922 domain_flush_cache(domain, pte, sizeof(*pte));
923 free_pgtable_page(level_pte);
924 }
925next:
926 pfn += level_size(level);
927 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
928}
929
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700930/* free page table pages. last level pte should already be cleared */
931static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100932 unsigned long start_pfn,
933 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700934{
David Woodhouse6660c632009-06-27 22:41:00 +0100935 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700936
David Woodhouse6660c632009-06-27 22:41:00 +0100937 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
938 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700939 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700940
David Woodhousef3a0a522009-06-30 03:40:07 +0100941 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -0600942 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
943 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +0100944
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700945 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100946 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700947 free_pgtable_page(domain->pgd);
948 domain->pgd = NULL;
949 }
950}
951
952/* iommu handling */
953static int iommu_alloc_root_entry(struct intel_iommu *iommu)
954{
955 struct root_entry *root;
956 unsigned long flags;
957
Suresh Siddha4c923d42009-10-02 11:01:24 -0700958 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700959 if (!root)
960 return -ENOMEM;
961
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700962 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700963
964 spin_lock_irqsave(&iommu->lock, flags);
965 iommu->root_entry = root;
966 spin_unlock_irqrestore(&iommu->lock, flags);
967
968 return 0;
969}
970
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700971static void iommu_set_root_entry(struct intel_iommu *iommu)
972{
973 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100974 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700975 unsigned long flag;
976
977 addr = iommu->root_entry;
978
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200979 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700980 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
981
David Woodhousec416daa2009-05-10 20:30:58 +0100982 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700983
984 /* Make sure hardware complete it */
985 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100986 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700987
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200988 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700989}
990
991static void iommu_flush_write_buffer(struct intel_iommu *iommu)
992{
993 u32 val;
994 unsigned long flag;
995
David Woodhouse9af88142009-02-13 23:18:03 +0000996 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700997 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700998
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200999 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001000 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001001
1002 /* Make sure hardware complete it */
1003 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001004 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001005
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001006 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001007}
1008
1009/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001010static void __iommu_flush_context(struct intel_iommu *iommu,
1011 u16 did, u16 source_id, u8 function_mask,
1012 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001013{
1014 u64 val = 0;
1015 unsigned long flag;
1016
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001017 switch (type) {
1018 case DMA_CCMD_GLOBAL_INVL:
1019 val = DMA_CCMD_GLOBAL_INVL;
1020 break;
1021 case DMA_CCMD_DOMAIN_INVL:
1022 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1023 break;
1024 case DMA_CCMD_DEVICE_INVL:
1025 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1026 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1027 break;
1028 default:
1029 BUG();
1030 }
1031 val |= DMA_CCMD_ICC;
1032
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001033 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001034 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1035
1036 /* Make sure hardware complete it */
1037 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1038 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1039
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001040 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001041}
1042
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001043/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001044static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1045 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001046{
1047 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1048 u64 val = 0, val_iva = 0;
1049 unsigned long flag;
1050
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001051 switch (type) {
1052 case DMA_TLB_GLOBAL_FLUSH:
1053 /* global flush doesn't need set IVA_REG */
1054 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1055 break;
1056 case DMA_TLB_DSI_FLUSH:
1057 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1058 break;
1059 case DMA_TLB_PSI_FLUSH:
1060 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1061 /* Note: always flush non-leaf currently */
1062 val_iva = size_order | addr;
1063 break;
1064 default:
1065 BUG();
1066 }
1067 /* Note: set drain read/write */
1068#if 0
1069 /*
1070 * This is probably to be super secure.. Looks like we can
1071 * ignore it without any impact.
1072 */
1073 if (cap_read_drain(iommu->cap))
1074 val |= DMA_TLB_READ_DRAIN;
1075#endif
1076 if (cap_write_drain(iommu->cap))
1077 val |= DMA_TLB_WRITE_DRAIN;
1078
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001079 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001080 /* Note: Only uses first TLB reg currently */
1081 if (val_iva)
1082 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1083 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1084
1085 /* Make sure hardware complete it */
1086 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1087 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1088
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001089 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001090
1091 /* check IOTLB invalidation granularity */
1092 if (DMA_TLB_IAIG(val) == 0)
1093 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1094 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1095 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001096 (unsigned long long)DMA_TLB_IIRG(type),
1097 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001098}
1099
Yu Zhao93a23a72009-05-18 13:51:37 +08001100static struct device_domain_info *iommu_support_dev_iotlb(
1101 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001102{
Yu Zhao93a23a72009-05-18 13:51:37 +08001103 int found = 0;
1104 unsigned long flags;
1105 struct device_domain_info *info;
1106 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1107
1108 if (!ecap_dev_iotlb_support(iommu->ecap))
1109 return NULL;
1110
1111 if (!iommu->qi)
1112 return NULL;
1113
1114 spin_lock_irqsave(&device_domain_lock, flags);
1115 list_for_each_entry(info, &domain->devices, link)
1116 if (info->bus == bus && info->devfn == devfn) {
1117 found = 1;
1118 break;
1119 }
1120 spin_unlock_irqrestore(&device_domain_lock, flags);
1121
1122 if (!found || !info->dev)
1123 return NULL;
1124
1125 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1126 return NULL;
1127
1128 if (!dmar_find_matched_atsr_unit(info->dev))
1129 return NULL;
1130
1131 info->iommu = iommu;
1132
1133 return info;
1134}
1135
1136static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1137{
1138 if (!info)
1139 return;
1140
1141 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1142}
1143
1144static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1145{
1146 if (!info->dev || !pci_ats_enabled(info->dev))
1147 return;
1148
1149 pci_disable_ats(info->dev);
1150}
1151
1152static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1153 u64 addr, unsigned mask)
1154{
1155 u16 sid, qdep;
1156 unsigned long flags;
1157 struct device_domain_info *info;
1158
1159 spin_lock_irqsave(&device_domain_lock, flags);
1160 list_for_each_entry(info, &domain->devices, link) {
1161 if (!info->dev || !pci_ats_enabled(info->dev))
1162 continue;
1163
1164 sid = info->bus << 8 | info->devfn;
1165 qdep = pci_ats_queue_depth(info->dev);
1166 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1167 }
1168 spin_unlock_irqrestore(&device_domain_lock, flags);
1169}
1170
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001171static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
Nadav Amit82653632010-04-01 13:24:40 +03001172 unsigned long pfn, unsigned int pages, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001173{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001174 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001175 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001176
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001177 BUG_ON(pages == 0);
1178
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001179 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001180 * Fallback to domain selective flush if no PSI support or the size is
1181 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001182 * PSI requires page size to be 2 ^ x, and the base address is naturally
1183 * aligned to the size
1184 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001185 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1186 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001187 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001188 else
1189 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1190 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001191
1192 /*
Nadav Amit82653632010-04-01 13:24:40 +03001193 * In caching mode, changes of pages from non-present to present require
1194 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001195 */
Nadav Amit82653632010-04-01 13:24:40 +03001196 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001197 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001198}
1199
mark grossf8bab732008-02-08 04:18:38 -08001200static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1201{
1202 u32 pmen;
1203 unsigned long flags;
1204
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001205 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001206 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1207 pmen &= ~DMA_PMEN_EPM;
1208 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1209
1210 /* wait for the protected region status bit to clear */
1211 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1212 readl, !(pmen & DMA_PMEN_PRS), pmen);
1213
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001214 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001215}
1216
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001217static int iommu_enable_translation(struct intel_iommu *iommu)
1218{
1219 u32 sts;
1220 unsigned long flags;
1221
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001222 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001223 iommu->gcmd |= DMA_GCMD_TE;
1224 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001225
1226 /* Make sure hardware complete it */
1227 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001228 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001229
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001230 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001231 return 0;
1232}
1233
1234static int iommu_disable_translation(struct intel_iommu *iommu)
1235{
1236 u32 sts;
1237 unsigned long flag;
1238
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001239 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001240 iommu->gcmd &= ~DMA_GCMD_TE;
1241 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1242
1243 /* Make sure hardware complete it */
1244 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001245 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001246
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001247 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001248 return 0;
1249}
1250
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001251
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001252static int iommu_init_domains(struct intel_iommu *iommu)
1253{
1254 unsigned long ndomains;
1255 unsigned long nlongs;
1256
1257 ndomains = cap_ndoms(iommu->cap);
Masanari Iida68aeb962012-01-25 00:25:52 +09001258 pr_debug("IOMMU %d: Number of Domains supported <%ld>\n", iommu->seq_id,
Yinghai Lu680a7522010-04-08 19:58:23 +01001259 ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001260 nlongs = BITS_TO_LONGS(ndomains);
1261
Donald Dutile94a91b52009-08-20 16:51:34 -04001262 spin_lock_init(&iommu->lock);
1263
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001264 /* TBD: there might be 64K domains,
1265 * consider other allocation for future chip
1266 */
1267 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1268 if (!iommu->domain_ids) {
1269 printk(KERN_ERR "Allocating domain id array failed\n");
1270 return -ENOMEM;
1271 }
1272 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1273 GFP_KERNEL);
1274 if (!iommu->domains) {
1275 printk(KERN_ERR "Allocating domain array failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001276 return -ENOMEM;
1277 }
1278
1279 /*
1280 * if Caching mode is set, then invalid translations are tagged
1281 * with domainid 0. Hence we need to pre-allocate it.
1282 */
1283 if (cap_caching_mode(iommu->cap))
1284 set_bit(0, iommu->domain_ids);
1285 return 0;
1286}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001287
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001288
1289static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001290static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001291
1292void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001293{
1294 struct dmar_domain *domain;
1295 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001296 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001297
Donald Dutile94a91b52009-08-20 16:51:34 -04001298 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001299 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Donald Dutile94a91b52009-08-20 16:51:34 -04001300 domain = iommu->domains[i];
1301 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001302
Donald Dutile94a91b52009-08-20 16:51:34 -04001303 spin_lock_irqsave(&domain->iommu_lock, flags);
1304 if (--domain->iommu_count == 0) {
1305 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1306 vm_domain_exit(domain);
1307 else
1308 domain_exit(domain);
1309 }
1310 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001311 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001312 }
1313
1314 if (iommu->gcmd & DMA_GCMD_TE)
1315 iommu_disable_translation(iommu);
1316
1317 if (iommu->irq) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001318 irq_set_handler_data(iommu->irq, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001319 /* This will mask the irq */
1320 free_irq(iommu->irq, iommu);
1321 destroy_irq(iommu->irq);
1322 }
1323
1324 kfree(iommu->domains);
1325 kfree(iommu->domain_ids);
1326
Weidong Hand9630fe2008-12-08 11:06:32 +08001327 g_iommus[iommu->seq_id] = NULL;
1328
1329 /* if all iommus are freed, free g_iommus */
1330 for (i = 0; i < g_num_of_iommus; i++) {
1331 if (g_iommus[i])
1332 break;
1333 }
1334
1335 if (i == g_num_of_iommus)
1336 kfree(g_iommus);
1337
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001338 /* free context mapping */
1339 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001340}
1341
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001342static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001343{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001344 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001345
1346 domain = alloc_domain_mem();
1347 if (!domain)
1348 return NULL;
1349
Suresh Siddha4c923d42009-10-02 11:01:24 -07001350 domain->nid = -1;
Mike Travis1b198bb2012-03-05 15:05:16 -08001351 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Hand71a2f32008-12-07 21:13:41 +08001352 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001353
1354 return domain;
1355}
1356
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001357static int iommu_attach_domain(struct dmar_domain *domain,
1358 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001359{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001360 int num;
1361 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001362 unsigned long flags;
1363
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001364 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001365
1366 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001367
1368 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1369 if (num >= ndomains) {
1370 spin_unlock_irqrestore(&iommu->lock, flags);
1371 printk(KERN_ERR "IOMMU: no free domain ids\n");
1372 return -ENOMEM;
1373 }
1374
1375 domain->id = num;
1376 set_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001377 set_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001378 iommu->domains[num] = domain;
1379 spin_unlock_irqrestore(&iommu->lock, flags);
1380
1381 return 0;
1382}
1383
1384static void iommu_detach_domain(struct dmar_domain *domain,
1385 struct intel_iommu *iommu)
1386{
1387 unsigned long flags;
1388 int num, ndomains;
1389 int found = 0;
1390
1391 spin_lock_irqsave(&iommu->lock, flags);
1392 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001393 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001394 if (iommu->domains[num] == domain) {
1395 found = 1;
1396 break;
1397 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001398 }
1399
1400 if (found) {
1401 clear_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001402 clear_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001403 iommu->domains[num] = NULL;
1404 }
Weidong Han8c11e792008-12-08 15:29:22 +08001405 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001406}
1407
1408static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001409static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001410
Joseph Cihula51a63e62011-03-21 11:04:24 -07001411static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001412{
1413 struct pci_dev *pdev = NULL;
1414 struct iova *iova;
1415 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001416
David Millerf6611972008-02-06 01:36:23 -08001417 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001418
Mark Gross8a443df2008-03-04 14:59:31 -08001419 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1420 &reserved_rbtree_key);
1421
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001422 /* IOAPIC ranges shouldn't be accessed by DMA */
1423 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1424 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001425 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001426 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001427 return -ENODEV;
1428 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001429
1430 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1431 for_each_pci_dev(pdev) {
1432 struct resource *r;
1433
1434 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1435 r = &pdev->resource[i];
1436 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1437 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001438 iova = reserve_iova(&reserved_iova_list,
1439 IOVA_PFN(r->start),
1440 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001441 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001442 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001443 return -ENODEV;
1444 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001445 }
1446 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001447 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001448}
1449
1450static void domain_reserve_special_ranges(struct dmar_domain *domain)
1451{
1452 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1453}
1454
1455static inline int guestwidth_to_adjustwidth(int gaw)
1456{
1457 int agaw;
1458 int r = (gaw - 12) % 9;
1459
1460 if (r == 0)
1461 agaw = gaw;
1462 else
1463 agaw = gaw + 9 - r;
1464 if (agaw > 64)
1465 agaw = 64;
1466 return agaw;
1467}
1468
1469static int domain_init(struct dmar_domain *domain, int guest_width)
1470{
1471 struct intel_iommu *iommu;
1472 int adjust_width, agaw;
1473 unsigned long sagaw;
1474
David Millerf6611972008-02-06 01:36:23 -08001475 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001476 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001477
1478 domain_reserve_special_ranges(domain);
1479
1480 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001481 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001482 if (guest_width > cap_mgaw(iommu->cap))
1483 guest_width = cap_mgaw(iommu->cap);
1484 domain->gaw = guest_width;
1485 adjust_width = guestwidth_to_adjustwidth(guest_width);
1486 agaw = width_to_agaw(adjust_width);
1487 sagaw = cap_sagaw(iommu->cap);
1488 if (!test_bit(agaw, &sagaw)) {
1489 /* hardware doesn't support it, choose a bigger one */
1490 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1491 agaw = find_next_bit(&sagaw, 5, agaw);
1492 if (agaw >= 5)
1493 return -ENODEV;
1494 }
1495 domain->agaw = agaw;
1496 INIT_LIST_HEAD(&domain->devices);
1497
Weidong Han8e6040972008-12-08 15:49:06 +08001498 if (ecap_coherent(iommu->ecap))
1499 domain->iommu_coherency = 1;
1500 else
1501 domain->iommu_coherency = 0;
1502
Sheng Yang58c610b2009-03-18 15:33:05 +08001503 if (ecap_sc_support(iommu->ecap))
1504 domain->iommu_snooping = 1;
1505 else
1506 domain->iommu_snooping = 0;
1507
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001508 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
Weidong Hanc7151a82008-12-08 22:51:37 +08001509 domain->iommu_count = 1;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001510 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001511
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001512 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001513 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001514 if (!domain->pgd)
1515 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001516 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001517 return 0;
1518}
1519
1520static void domain_exit(struct dmar_domain *domain)
1521{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001522 struct dmar_drhd_unit *drhd;
1523 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001524
1525 /* Domain 0 is reserved, so dont process it */
1526 if (!domain)
1527 return;
1528
Alex Williamson7b668352011-05-24 12:02:41 +01001529 /* Flush any lazy unmaps that may reference this domain */
1530 if (!intel_iommu_strict)
1531 flush_unmaps_timeout(0);
1532
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001533 domain_remove_dev_info(domain);
1534 /* destroy iovas */
1535 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001536
1537 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001538 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001539
1540 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001541 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001542
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001543 for_each_active_iommu(iommu, drhd)
Mike Travis1b198bb2012-03-05 15:05:16 -08001544 if (test_bit(iommu->seq_id, domain->iommu_bmp))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001545 iommu_detach_domain(domain, iommu);
1546
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001547 free_domain_mem(domain);
1548}
1549
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001550static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1551 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001552{
1553 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001554 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001555 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001556 struct dma_pte *pgd;
1557 unsigned long num;
1558 unsigned long ndomains;
1559 int id;
1560 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001561 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001562
1563 pr_debug("Set context mapping for %02x:%02x.%d\n",
1564 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001565
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001566 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001567 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1568 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001569
David Woodhouse276dbf992009-04-04 01:45:37 +01001570 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001571 if (!iommu)
1572 return -ENODEV;
1573
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001574 context = device_to_context_entry(iommu, bus, devfn);
1575 if (!context)
1576 return -ENOMEM;
1577 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001578 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001579 spin_unlock_irqrestore(&iommu->lock, flags);
1580 return 0;
1581 }
1582
Weidong Hanea6606b2008-12-08 23:08:15 +08001583 id = domain->id;
1584 pgd = domain->pgd;
1585
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001586 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1587 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001588 int found = 0;
1589
1590 /* find an available domain id for this device in iommu */
1591 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001592 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001593 if (iommu->domains[num] == domain) {
1594 id = num;
1595 found = 1;
1596 break;
1597 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001598 }
1599
1600 if (found == 0) {
1601 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1602 if (num >= ndomains) {
1603 spin_unlock_irqrestore(&iommu->lock, flags);
1604 printk(KERN_ERR "IOMMU: no free domain ids\n");
1605 return -EFAULT;
1606 }
1607
1608 set_bit(num, iommu->domain_ids);
1609 iommu->domains[num] = domain;
1610 id = num;
1611 }
1612
1613 /* Skip top levels of page tables for
1614 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001615 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001616 */
Chris Wright1672af12009-12-02 12:06:34 -08001617 if (translation != CONTEXT_TT_PASS_THROUGH) {
1618 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1619 pgd = phys_to_virt(dma_pte_addr(pgd));
1620 if (!dma_pte_present(pgd)) {
1621 spin_unlock_irqrestore(&iommu->lock, flags);
1622 return -ENOMEM;
1623 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001624 }
1625 }
1626 }
1627
1628 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001629
Yu Zhao93a23a72009-05-18 13:51:37 +08001630 if (translation != CONTEXT_TT_PASS_THROUGH) {
1631 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1632 translation = info ? CONTEXT_TT_DEV_IOTLB :
1633 CONTEXT_TT_MULTI_LEVEL;
1634 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001635 /*
1636 * In pass through mode, AW must be programmed to indicate the largest
1637 * AGAW value supported by hardware. And ASR is ignored by hardware.
1638 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001639 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001640 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001641 else {
1642 context_set_address_root(context, virt_to_phys(pgd));
1643 context_set_address_width(context, iommu->agaw);
1644 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001645
1646 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001647 context_set_fault_enable(context);
1648 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001649 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001650
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001651 /*
1652 * It's a non-present to present mapping. If hardware doesn't cache
1653 * non-present entry we only need to flush the write-buffer. If the
1654 * _does_ cache non-present entries, then it does so in the special
1655 * domain #0, which we have to flush:
1656 */
1657 if (cap_caching_mode(iommu->cap)) {
1658 iommu->flush.flush_context(iommu, 0,
1659 (((u16)bus) << 8) | devfn,
1660 DMA_CCMD_MASK_NOBIT,
1661 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001662 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001663 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001664 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001665 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001666 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001667 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001668
1669 spin_lock_irqsave(&domain->iommu_lock, flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08001670 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08001671 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001672 if (domain->iommu_count == 1)
1673 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001674 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001675 }
1676 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001677 return 0;
1678}
1679
1680static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001681domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1682 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001683{
1684 int ret;
1685 struct pci_dev *tmp, *parent;
1686
David Woodhouse276dbf992009-04-04 01:45:37 +01001687 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001688 pdev->bus->number, pdev->devfn,
1689 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001690 if (ret)
1691 return ret;
1692
1693 /* dependent device mapping */
1694 tmp = pci_find_upstream_pcie_bridge(pdev);
1695 if (!tmp)
1696 return 0;
1697 /* Secondary interface's bus number and devfn 0 */
1698 parent = pdev->bus->self;
1699 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001700 ret = domain_context_mapping_one(domain,
1701 pci_domain_nr(parent->bus),
1702 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001703 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001704 if (ret)
1705 return ret;
1706 parent = parent->bus->self;
1707 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001708 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001709 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001710 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001711 tmp->subordinate->number, 0,
1712 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001713 else /* this is a legacy PCI bridge */
1714 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001715 pci_domain_nr(tmp->bus),
1716 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001717 tmp->devfn,
1718 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001719}
1720
Weidong Han5331fe62008-12-08 23:00:00 +08001721static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001722{
1723 int ret;
1724 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001725 struct intel_iommu *iommu;
1726
David Woodhouse276dbf992009-04-04 01:45:37 +01001727 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1728 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001729 if (!iommu)
1730 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001731
David Woodhouse276dbf992009-04-04 01:45:37 +01001732 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001733 if (!ret)
1734 return ret;
1735 /* dependent device mapping */
1736 tmp = pci_find_upstream_pcie_bridge(pdev);
1737 if (!tmp)
1738 return ret;
1739 /* Secondary interface's bus number and devfn 0 */
1740 parent = pdev->bus->self;
1741 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001742 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001743 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001744 if (!ret)
1745 return ret;
1746 parent = parent->bus->self;
1747 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001748 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001749 return device_context_mapped(iommu, tmp->subordinate->number,
1750 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001751 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001752 return device_context_mapped(iommu, tmp->bus->number,
1753 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001754}
1755
Fenghua Yuf5329592009-08-04 15:09:37 -07001756/* Returns a number of VTD pages, but aligned to MM page size */
1757static inline unsigned long aligned_nrpages(unsigned long host_addr,
1758 size_t size)
1759{
1760 host_addr &= ~PAGE_MASK;
1761 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1762}
1763
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001764/* Return largest possible superpage level for a given mapping */
1765static inline int hardware_largepage_caps(struct dmar_domain *domain,
1766 unsigned long iov_pfn,
1767 unsigned long phy_pfn,
1768 unsigned long pages)
1769{
1770 int support, level = 1;
1771 unsigned long pfnmerge;
1772
1773 support = domain->iommu_superpage;
1774
1775 /* To use a large page, the virtual *and* physical addresses
1776 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1777 of them will mean we have to use smaller pages. So just
1778 merge them and check both at once. */
1779 pfnmerge = iov_pfn | phy_pfn;
1780
1781 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1782 pages >>= VTD_STRIDE_SHIFT;
1783 if (!pages)
1784 break;
1785 pfnmerge >>= VTD_STRIDE_SHIFT;
1786 level++;
1787 support--;
1788 }
1789 return level;
1790}
1791
David Woodhouse9051aa02009-06-29 12:30:54 +01001792static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1793 struct scatterlist *sg, unsigned long phys_pfn,
1794 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001795{
1796 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001797 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001798 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001799 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001800 unsigned int largepage_lvl = 0;
1801 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001802
1803 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1804
1805 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1806 return -EINVAL;
1807
1808 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1809
David Woodhouse9051aa02009-06-29 12:30:54 +01001810 if (sg)
1811 sg_res = 0;
1812 else {
1813 sg_res = nr_pages + 1;
1814 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1815 }
1816
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001817 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001818 uint64_t tmp;
1819
David Woodhousee1605492009-06-29 11:17:38 +01001820 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001821 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001822 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1823 sg->dma_length = sg->length;
1824 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001825 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001826 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001827
David Woodhousee1605492009-06-29 11:17:38 +01001828 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001829 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1830
1831 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01001832 if (!pte)
1833 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001834 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001835 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001836 pteval |= DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001837 /* Ensure that old small page tables are removed to make room
1838 for superpage, if they exist. */
1839 dma_pte_clear_range(domain, iov_pfn,
1840 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1841 dma_pte_free_pagetable(domain, iov_pfn,
1842 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1843 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001844 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001845 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001846
David Woodhousee1605492009-06-29 11:17:38 +01001847 }
1848 /* We don't need lock here, nobody else
1849 * touches the iova range
1850 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001851 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001852 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001853 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001854 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1855 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001856 if (dumps) {
1857 dumps--;
1858 debug_dma_dump_mappings(NULL);
1859 }
1860 WARN_ON(1);
1861 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001862
1863 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1864
1865 BUG_ON(nr_pages < lvl_pages);
1866 BUG_ON(sg_res < lvl_pages);
1867
1868 nr_pages -= lvl_pages;
1869 iov_pfn += lvl_pages;
1870 phys_pfn += lvl_pages;
1871 pteval += lvl_pages * VTD_PAGE_SIZE;
1872 sg_res -= lvl_pages;
1873
1874 /* If the next PTE would be the first in a new page, then we
1875 need to flush the cache on the entries we've just written.
1876 And then we'll need to recalculate 'pte', so clear it and
1877 let it get set again in the if (!pte) block above.
1878
1879 If we're done (!nr_pages) we need to flush the cache too.
1880
1881 Also if we've been setting superpages, we may need to
1882 recalculate 'pte' and switch back to smaller pages for the
1883 end of the mapping, if the trailing size is not enough to
1884 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01001885 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001886 if (!nr_pages || first_pte_in_page(pte) ||
1887 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01001888 domain_flush_cache(domain, first_pte,
1889 (void *)pte - (void *)first_pte);
1890 pte = NULL;
1891 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001892
1893 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01001894 sg = sg_next(sg);
1895 }
1896 return 0;
1897}
1898
David Woodhouse9051aa02009-06-29 12:30:54 +01001899static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1900 struct scatterlist *sg, unsigned long nr_pages,
1901 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001902{
David Woodhouse9051aa02009-06-29 12:30:54 +01001903 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1904}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001905
David Woodhouse9051aa02009-06-29 12:30:54 +01001906static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1907 unsigned long phys_pfn, unsigned long nr_pages,
1908 int prot)
1909{
1910 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001911}
1912
Weidong Hanc7151a82008-12-08 22:51:37 +08001913static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001914{
Weidong Hanc7151a82008-12-08 22:51:37 +08001915 if (!iommu)
1916 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001917
1918 clear_context_table(iommu, bus, devfn);
1919 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001920 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001921 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001922}
1923
David Woodhouse109b9b02012-05-25 17:43:02 +01001924static inline void unlink_domain_info(struct device_domain_info *info)
1925{
1926 assert_spin_locked(&device_domain_lock);
1927 list_del(&info->link);
1928 list_del(&info->global);
1929 if (info->dev)
1930 info->dev->dev.archdata.iommu = NULL;
1931}
1932
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001933static void domain_remove_dev_info(struct dmar_domain *domain)
1934{
1935 struct device_domain_info *info;
1936 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001937 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001938
1939 spin_lock_irqsave(&device_domain_lock, flags);
1940 while (!list_empty(&domain->devices)) {
1941 info = list_entry(domain->devices.next,
1942 struct device_domain_info, link);
David Woodhouse109b9b02012-05-25 17:43:02 +01001943 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001944 spin_unlock_irqrestore(&device_domain_lock, flags);
1945
Yu Zhao93a23a72009-05-18 13:51:37 +08001946 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001947 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001948 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001949 free_devinfo_mem(info);
1950
1951 spin_lock_irqsave(&device_domain_lock, flags);
1952 }
1953 spin_unlock_irqrestore(&device_domain_lock, flags);
1954}
1955
1956/*
1957 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001958 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001959 */
Kay, Allen M38717942008-09-09 18:37:29 +03001960static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001961find_domain(struct pci_dev *pdev)
1962{
1963 struct device_domain_info *info;
1964
1965 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001966 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001967 if (info)
1968 return info->domain;
1969 return NULL;
1970}
1971
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001972/* domain is initialized */
1973static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1974{
1975 struct dmar_domain *domain, *found = NULL;
1976 struct intel_iommu *iommu;
1977 struct dmar_drhd_unit *drhd;
1978 struct device_domain_info *info, *tmp;
1979 struct pci_dev *dev_tmp;
1980 unsigned long flags;
1981 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001982 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001983 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001984
1985 domain = find_domain(pdev);
1986 if (domain)
1987 return domain;
1988
David Woodhouse276dbf992009-04-04 01:45:37 +01001989 segment = pci_domain_nr(pdev->bus);
1990
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001991 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1992 if (dev_tmp) {
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001993 if (pci_is_pcie(dev_tmp)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001994 bus = dev_tmp->subordinate->number;
1995 devfn = 0;
1996 } else {
1997 bus = dev_tmp->bus->number;
1998 devfn = dev_tmp->devfn;
1999 }
2000 spin_lock_irqsave(&device_domain_lock, flags);
2001 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01002002 if (info->segment == segment &&
2003 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002004 found = info->domain;
2005 break;
2006 }
2007 }
2008 spin_unlock_irqrestore(&device_domain_lock, flags);
2009 /* pcie-pci bridge already has a domain, uses it */
2010 if (found) {
2011 domain = found;
2012 goto found_domain;
2013 }
2014 }
2015
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002016 domain = alloc_domain();
2017 if (!domain)
2018 goto error;
2019
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002020 /* Allocate new domain for the device */
2021 drhd = dmar_find_matched_drhd_unit(pdev);
2022 if (!drhd) {
2023 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
2024 pci_name(pdev));
Julia Lawalld2900bd2012-07-24 16:18:14 +02002025 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002026 return NULL;
2027 }
2028 iommu = drhd->iommu;
2029
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002030 ret = iommu_attach_domain(domain, iommu);
2031 if (ret) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002032 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002033 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002034 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002035
2036 if (domain_init(domain, gaw)) {
2037 domain_exit(domain);
2038 goto error;
2039 }
2040
2041 /* register pcie-to-pci device */
2042 if (dev_tmp) {
2043 info = alloc_devinfo_mem();
2044 if (!info) {
2045 domain_exit(domain);
2046 goto error;
2047 }
David Woodhouse276dbf992009-04-04 01:45:37 +01002048 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002049 info->bus = bus;
2050 info->devfn = devfn;
2051 info->dev = NULL;
2052 info->domain = domain;
2053 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08002054 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002055
2056 /* pcie-to-pci bridge already has a domain, uses it */
2057 found = NULL;
2058 spin_lock_irqsave(&device_domain_lock, flags);
2059 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01002060 if (tmp->segment == segment &&
2061 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002062 found = tmp->domain;
2063 break;
2064 }
2065 }
2066 if (found) {
Jiri Slaby00dfff72010-06-14 17:17:32 +02002067 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002068 free_devinfo_mem(info);
2069 domain_exit(domain);
2070 domain = found;
2071 } else {
2072 list_add(&info->link, &domain->devices);
2073 list_add(&info->global, &device_domain_list);
Jiri Slaby00dfff72010-06-14 17:17:32 +02002074 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002075 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002076 }
2077
2078found_domain:
2079 info = alloc_devinfo_mem();
2080 if (!info)
2081 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01002082 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002083 info->bus = pdev->bus->number;
2084 info->devfn = pdev->devfn;
2085 info->dev = pdev;
2086 info->domain = domain;
2087 spin_lock_irqsave(&device_domain_lock, flags);
2088 /* somebody is fast */
2089 found = find_domain(pdev);
2090 if (found != NULL) {
2091 spin_unlock_irqrestore(&device_domain_lock, flags);
2092 if (found != domain) {
2093 domain_exit(domain);
2094 domain = found;
2095 }
2096 free_devinfo_mem(info);
2097 return domain;
2098 }
2099 list_add(&info->link, &domain->devices);
2100 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002101 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002102 spin_unlock_irqrestore(&device_domain_lock, flags);
2103 return domain;
2104error:
2105 /* recheck it here, maybe others set it */
2106 return find_domain(pdev);
2107}
2108
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002109static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002110#define IDENTMAP_ALL 1
2111#define IDENTMAP_GFX 2
2112#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002113
David Woodhouseb2132032009-06-26 18:50:28 +01002114static int iommu_domain_identity_map(struct dmar_domain *domain,
2115 unsigned long long start,
2116 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002117{
David Woodhousec5395d52009-06-28 16:35:56 +01002118 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2119 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002120
David Woodhousec5395d52009-06-28 16:35:56 +01002121 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2122 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002123 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002124 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002125 }
2126
David Woodhousec5395d52009-06-28 16:35:56 +01002127 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2128 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002129 /*
2130 * RMRR range might have overlap with physical memory range,
2131 * clear it first
2132 */
David Woodhousec5395d52009-06-28 16:35:56 +01002133 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002134
David Woodhousec5395d52009-06-28 16:35:56 +01002135 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2136 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002137 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002138}
2139
2140static int iommu_prepare_identity_map(struct pci_dev *pdev,
2141 unsigned long long start,
2142 unsigned long long end)
2143{
2144 struct dmar_domain *domain;
2145 int ret;
2146
David Woodhousec7ab48d2009-06-26 19:10:36 +01002147 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002148 if (!domain)
2149 return -ENOMEM;
2150
David Woodhouse19943b02009-08-04 16:19:20 +01002151 /* For _hardware_ passthrough, don't bother. But for software
2152 passthrough, we do it anyway -- it may indicate a memory
2153 range which is reserved in E820, so which didn't get set
2154 up to start with in si_domain */
2155 if (domain == si_domain && hw_pass_through) {
2156 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2157 pci_name(pdev), start, end);
2158 return 0;
2159 }
2160
2161 printk(KERN_INFO
2162 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2163 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002164
David Woodhouse5595b522009-12-02 09:21:55 +00002165 if (end < start) {
2166 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2167 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2168 dmi_get_system_info(DMI_BIOS_VENDOR),
2169 dmi_get_system_info(DMI_BIOS_VERSION),
2170 dmi_get_system_info(DMI_PRODUCT_VERSION));
2171 ret = -EIO;
2172 goto error;
2173 }
2174
David Woodhouse2ff729f2009-08-26 14:25:41 +01002175 if (end >> agaw_to_width(domain->agaw)) {
2176 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2177 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2178 agaw_to_width(domain->agaw),
2179 dmi_get_system_info(DMI_BIOS_VENDOR),
2180 dmi_get_system_info(DMI_BIOS_VERSION),
2181 dmi_get_system_info(DMI_PRODUCT_VERSION));
2182 ret = -EIO;
2183 goto error;
2184 }
David Woodhouse19943b02009-08-04 16:19:20 +01002185
David Woodhouseb2132032009-06-26 18:50:28 +01002186 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002187 if (ret)
2188 goto error;
2189
2190 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002191 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002192 if (ret)
2193 goto error;
2194
2195 return 0;
2196
2197 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002198 domain_exit(domain);
2199 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002200}
2201
2202static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2203 struct pci_dev *pdev)
2204{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002205 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002206 return 0;
2207 return iommu_prepare_identity_map(pdev, rmrr->base_address,
David Woodhouse70e535d2011-05-31 00:22:52 +01002208 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002209}
2210
Suresh Siddhad3f13812011-08-23 17:05:25 -07002211#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002212static inline void iommu_prepare_isa(void)
2213{
2214 struct pci_dev *pdev;
2215 int ret;
2216
2217 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2218 if (!pdev)
2219 return;
2220
David Woodhousec7ab48d2009-06-26 19:10:36 +01002221 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse70e535d2011-05-31 00:22:52 +01002222 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002223
2224 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002225 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2226 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002227
2228}
2229#else
2230static inline void iommu_prepare_isa(void)
2231{
2232 return;
2233}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002234#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002235
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002236static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002237
Matt Kraai071e1372009-08-23 22:30:22 -07002238static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002239{
2240 struct dmar_drhd_unit *drhd;
2241 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002242 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002243
2244 si_domain = alloc_domain();
2245 if (!si_domain)
2246 return -EFAULT;
2247
David Woodhousec7ab48d2009-06-26 19:10:36 +01002248 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002249
2250 for_each_active_iommu(iommu, drhd) {
2251 ret = iommu_attach_domain(si_domain, iommu);
2252 if (ret) {
2253 domain_exit(si_domain);
2254 return -EFAULT;
2255 }
2256 }
2257
2258 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2259 domain_exit(si_domain);
2260 return -EFAULT;
2261 }
2262
2263 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2264
David Woodhouse19943b02009-08-04 16:19:20 +01002265 if (hw)
2266 return 0;
2267
David Woodhousec7ab48d2009-06-26 19:10:36 +01002268 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002269 unsigned long start_pfn, end_pfn;
2270 int i;
2271
2272 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2273 ret = iommu_domain_identity_map(si_domain,
2274 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2275 if (ret)
2276 return ret;
2277 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002278 }
2279
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002280 return 0;
2281}
2282
2283static void domain_remove_one_dev_info(struct dmar_domain *domain,
2284 struct pci_dev *pdev);
2285static int identity_mapping(struct pci_dev *pdev)
2286{
2287 struct device_domain_info *info;
2288
2289 if (likely(!iommu_identity_mapping))
2290 return 0;
2291
Mike Traviscb452a42011-05-28 13:15:03 -05002292 info = pdev->dev.archdata.iommu;
2293 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2294 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002295
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002296 return 0;
2297}
2298
2299static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002300 struct pci_dev *pdev,
2301 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002302{
2303 struct device_domain_info *info;
2304 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002305 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002306
2307 info = alloc_devinfo_mem();
2308 if (!info)
2309 return -ENOMEM;
2310
2311 info->segment = pci_domain_nr(pdev->bus);
2312 info->bus = pdev->bus->number;
2313 info->devfn = pdev->devfn;
2314 info->dev = pdev;
2315 info->domain = domain;
2316
2317 spin_lock_irqsave(&device_domain_lock, flags);
2318 list_add(&info->link, &domain->devices);
2319 list_add(&info->global, &device_domain_list);
2320 pdev->dev.archdata.iommu = info;
2321 spin_unlock_irqrestore(&device_domain_lock, flags);
2322
David Woodhousee2ad23d2012-05-25 17:42:54 +01002323 ret = domain_context_mapping(domain, pdev, translation);
2324 if (ret) {
2325 spin_lock_irqsave(&device_domain_lock, flags);
David Woodhouse109b9b02012-05-25 17:43:02 +01002326 unlink_domain_info(info);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002327 spin_unlock_irqrestore(&device_domain_lock, flags);
2328 free_devinfo_mem(info);
2329 return ret;
2330 }
2331
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002332 return 0;
2333}
2334
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002335static bool device_has_rmrr(struct pci_dev *dev)
2336{
2337 struct dmar_rmrr_unit *rmrr;
2338 int i;
2339
2340 for_each_rmrr_units(rmrr) {
2341 for (i = 0; i < rmrr->devices_cnt; i++) {
2342 /*
2343 * Return TRUE if this RMRR contains the device that
2344 * is passed in.
2345 */
2346 if (rmrr->devices[i] == dev)
2347 return true;
2348 }
2349 }
2350 return false;
2351}
2352
David Woodhouse6941af22009-07-04 18:24:27 +01002353static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2354{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002355
2356 /*
2357 * We want to prevent any device associated with an RMRR from
2358 * getting placed into the SI Domain. This is done because
2359 * problems exist when devices are moved in and out of domains
2360 * and their respective RMRR info is lost. We exempt USB devices
2361 * from this process due to their usage of RMRRs that are known
2362 * to not be needed after BIOS hand-off to OS.
2363 */
2364 if (device_has_rmrr(pdev) &&
2365 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2366 return 0;
2367
David Woodhousee0fc7e02009-09-30 09:12:17 -07002368 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2369 return 1;
2370
2371 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2372 return 1;
2373
2374 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2375 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002376
David Woodhouse3dfc8132009-07-04 19:11:08 +01002377 /*
2378 * We want to start off with all devices in the 1:1 domain, and
2379 * take them out later if we find they can't access all of memory.
2380 *
2381 * However, we can't do this for PCI devices behind bridges,
2382 * because all PCI devices behind the same bridge will end up
2383 * with the same source-id on their transactions.
2384 *
2385 * Practically speaking, we can't change things around for these
2386 * devices at run-time, because we can't be sure there'll be no
2387 * DMA transactions in flight for any of their siblings.
2388 *
2389 * So PCI devices (unless they're on the root bus) as well as
2390 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2391 * the 1:1 domain, just in _case_ one of their siblings turns out
2392 * not to be able to map all of memory.
2393 */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002394 if (!pci_is_pcie(pdev)) {
David Woodhouse3dfc8132009-07-04 19:11:08 +01002395 if (!pci_is_root_bus(pdev->bus))
2396 return 0;
2397 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2398 return 0;
Yijing Wang62f87c02012-07-24 17:20:03 +08002399 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
David Woodhouse3dfc8132009-07-04 19:11:08 +01002400 return 0;
2401
2402 /*
2403 * At boot time, we don't yet know if devices will be 64-bit capable.
2404 * Assume that they will -- if they turn out not to be, then we can
2405 * take them out of the 1:1 domain later.
2406 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002407 if (!startup) {
2408 /*
2409 * If the device's dma_mask is less than the system's memory
2410 * size then this is not a candidate for identity mapping.
2411 */
2412 u64 dma_mask = pdev->dma_mask;
2413
2414 if (pdev->dev.coherent_dma_mask &&
2415 pdev->dev.coherent_dma_mask < dma_mask)
2416 dma_mask = pdev->dev.coherent_dma_mask;
2417
2418 return dma_mask >= dma_get_required_mask(&pdev->dev);
2419 }
David Woodhouse6941af22009-07-04 18:24:27 +01002420
2421 return 1;
2422}
2423
Matt Kraai071e1372009-08-23 22:30:22 -07002424static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002425{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002426 struct pci_dev *pdev = NULL;
2427 int ret;
2428
David Woodhouse19943b02009-08-04 16:19:20 +01002429 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002430 if (ret)
2431 return -EFAULT;
2432
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002433 for_each_pci_dev(pdev) {
David Woodhouse6941af22009-07-04 18:24:27 +01002434 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse5fe60f42009-08-09 10:53:41 +01002435 ret = domain_add_dev_info(si_domain, pdev,
Mike Traviseae460b2012-03-05 15:05:16 -08002436 hw ? CONTEXT_TT_PASS_THROUGH :
2437 CONTEXT_TT_MULTI_LEVEL);
2438 if (ret) {
2439 /* device not associated with an iommu */
2440 if (ret == -ENODEV)
2441 continue;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002442 return ret;
Mike Traviseae460b2012-03-05 15:05:16 -08002443 }
2444 pr_info("IOMMU: %s identity mapping for device %s\n",
2445 hw ? "hardware" : "software", pci_name(pdev));
David Woodhouse62edf5d2009-07-04 10:59:46 +01002446 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002447 }
2448
2449 return 0;
2450}
2451
Joseph Cihulab7792602011-05-03 00:08:37 -07002452static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002453{
2454 struct dmar_drhd_unit *drhd;
2455 struct dmar_rmrr_unit *rmrr;
2456 struct pci_dev *pdev;
2457 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002458 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002459
2460 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002461 * for each drhd
2462 * allocate root
2463 * initialize and program root entry to not present
2464 * endfor
2465 */
2466 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002467 /*
2468 * lock not needed as this is only incremented in the single
2469 * threaded kernel __init code path all other access are read
2470 * only
2471 */
Mike Travis1b198bb2012-03-05 15:05:16 -08002472 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2473 g_num_of_iommus++;
2474 continue;
2475 }
2476 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2477 IOMMU_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002478 }
2479
Weidong Hand9630fe2008-12-08 11:06:32 +08002480 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2481 GFP_KERNEL);
2482 if (!g_iommus) {
2483 printk(KERN_ERR "Allocating global iommu array failed\n");
2484 ret = -ENOMEM;
2485 goto error;
2486 }
2487
mark gross80b20dd2008-04-18 13:53:58 -07002488 deferred_flush = kzalloc(g_num_of_iommus *
2489 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2490 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002491 ret = -ENOMEM;
2492 goto error;
2493 }
2494
mark gross5e0d2a62008-03-04 15:22:08 -08002495 for_each_drhd_unit(drhd) {
2496 if (drhd->ignored)
2497 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002498
2499 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002500 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002501
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002502 ret = iommu_init_domains(iommu);
2503 if (ret)
2504 goto error;
2505
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002506 /*
2507 * TBD:
2508 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002509 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002510 */
2511 ret = iommu_alloc_root_entry(iommu);
2512 if (ret) {
2513 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2514 goto error;
2515 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002516 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002517 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002518 }
2519
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002520 /*
2521 * Start from the sane iommu hardware state.
2522 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002523 for_each_drhd_unit(drhd) {
2524 if (drhd->ignored)
2525 continue;
2526
2527 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002528
2529 /*
2530 * If the queued invalidation is already initialized by us
2531 * (for example, while enabling interrupt-remapping) then
2532 * we got the things already rolling from a sane state.
2533 */
2534 if (iommu->qi)
2535 continue;
2536
2537 /*
2538 * Clear any previous faults.
2539 */
2540 dmar_fault(-1, iommu);
2541 /*
2542 * Disable queued invalidation if supported and already enabled
2543 * before OS handover.
2544 */
2545 dmar_disable_qi(iommu);
2546 }
2547
2548 for_each_drhd_unit(drhd) {
2549 if (drhd->ignored)
2550 continue;
2551
2552 iommu = drhd->iommu;
2553
Youquan Songa77b67d2008-10-16 16:31:56 -07002554 if (dmar_enable_qi(iommu)) {
2555 /*
2556 * Queued Invalidate not enabled, use Register Based
2557 * Invalidate
2558 */
2559 iommu->flush.flush_context = __iommu_flush_context;
2560 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002561 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002562 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002563 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002564 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002565 } else {
2566 iommu->flush.flush_context = qi_flush_context;
2567 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002568 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002569 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002570 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002571 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002572 }
2573 }
2574
David Woodhouse19943b02009-08-04 16:19:20 +01002575 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002576 iommu_identity_mapping |= IDENTMAP_ALL;
2577
Suresh Siddhad3f13812011-08-23 17:05:25 -07002578#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002579 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002580#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002581
2582 check_tylersburg_isoch();
2583
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002584 /*
2585 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002586 * identity mappings for rmrr, gfx, and isa and may fall back to static
2587 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002588 */
David Woodhouse19943b02009-08-04 16:19:20 +01002589 if (iommu_identity_mapping) {
2590 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2591 if (ret) {
2592 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2593 goto error;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002594 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002595 }
David Woodhouse19943b02009-08-04 16:19:20 +01002596 /*
2597 * For each rmrr
2598 * for each dev attached to rmrr
2599 * do
2600 * locate drhd for dev, alloc domain for dev
2601 * allocate free domain
2602 * allocate page table entries for rmrr
2603 * if context not allocated for bus
2604 * allocate and init context
2605 * set present in root table for this bus
2606 * init context with domain, translation etc
2607 * endfor
2608 * endfor
2609 */
2610 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2611 for_each_rmrr_units(rmrr) {
2612 for (i = 0; i < rmrr->devices_cnt; i++) {
2613 pdev = rmrr->devices[i];
2614 /*
2615 * some BIOS lists non-exist devices in DMAR
2616 * table.
2617 */
2618 if (!pdev)
2619 continue;
2620 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2621 if (ret)
2622 printk(KERN_ERR
2623 "IOMMU: mapping reserved region failed\n");
2624 }
2625 }
2626
2627 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002628
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002629 /*
2630 * for each drhd
2631 * enable fault log
2632 * global invalidate context cache
2633 * global invalidate iotlb
2634 * enable translation
2635 */
2636 for_each_drhd_unit(drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002637 if (drhd->ignored) {
2638 /*
2639 * we always have to disable PMRs or DMA may fail on
2640 * this device
2641 */
2642 if (force_on)
2643 iommu_disable_protect_mem_regions(drhd->iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002644 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002645 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002646 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002647
2648 iommu_flush_write_buffer(iommu);
2649
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002650 ret = dmar_set_interrupt(iommu);
2651 if (ret)
2652 goto error;
2653
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002654 iommu_set_root_entry(iommu);
2655
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002656 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002657 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002658
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002659 ret = iommu_enable_translation(iommu);
2660 if (ret)
2661 goto error;
David Woodhouseb94996c2009-09-19 15:28:12 -07002662
2663 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002664 }
2665
2666 return 0;
2667error:
2668 for_each_drhd_unit(drhd) {
2669 if (drhd->ignored)
2670 continue;
2671 iommu = drhd->iommu;
2672 free_iommu(iommu);
2673 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002674 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002675 return ret;
2676}
2677
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002678/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002679static struct iova *intel_alloc_iova(struct device *dev,
2680 struct dmar_domain *domain,
2681 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002682{
2683 struct pci_dev *pdev = to_pci_dev(dev);
2684 struct iova *iova = NULL;
2685
David Woodhouse875764d2009-06-28 21:20:51 +01002686 /* Restrict dma_mask to the width that the iommu can handle */
2687 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2688
2689 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002690 /*
2691 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002692 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002693 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002694 */
David Woodhouse875764d2009-06-28 21:20:51 +01002695 iova = alloc_iova(&domain->iovad, nrpages,
2696 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2697 if (iova)
2698 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002699 }
David Woodhouse875764d2009-06-28 21:20:51 +01002700 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2701 if (unlikely(!iova)) {
2702 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2703 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002704 return NULL;
2705 }
2706
2707 return iova;
2708}
2709
David Woodhouse147202a2009-07-07 19:43:20 +01002710static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002711{
2712 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002713 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002714
2715 domain = get_domain_for_dev(pdev,
2716 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2717 if (!domain) {
2718 printk(KERN_ERR
2719 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002720 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002721 }
2722
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002723 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002724 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002725 ret = domain_context_mapping(domain, pdev,
2726 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002727 if (ret) {
2728 printk(KERN_ERR
2729 "Domain context map for %s failed",
2730 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002731 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002732 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002733 }
2734
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002735 return domain;
2736}
2737
David Woodhouse147202a2009-07-07 19:43:20 +01002738static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2739{
2740 struct device_domain_info *info;
2741
2742 /* No lock here, assumes no domain exit in normal case */
2743 info = dev->dev.archdata.iommu;
2744 if (likely(info))
2745 return info->domain;
2746
2747 return __get_valid_domain_for_dev(dev);
2748}
2749
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002750static int iommu_dummy(struct pci_dev *pdev)
2751{
2752 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2753}
2754
2755/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002756static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002757{
David Woodhouse73676832009-07-04 14:08:36 +01002758 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002759 int found;
2760
David Woodhouse73676832009-07-04 14:08:36 +01002761 if (unlikely(dev->bus != &pci_bus_type))
2762 return 1;
2763
2764 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002765 if (iommu_dummy(pdev))
2766 return 1;
2767
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002768 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002769 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002770
2771 found = identity_mapping(pdev);
2772 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002773 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002774 return 1;
2775 else {
2776 /*
2777 * 32 bit DMA is removed from si_domain and fall back
2778 * to non-identity mapping.
2779 */
2780 domain_remove_one_dev_info(si_domain, pdev);
2781 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2782 pci_name(pdev));
2783 return 0;
2784 }
2785 } else {
2786 /*
2787 * In case of a detached 64 bit DMA device from vm, the device
2788 * is put into si_domain for identity mapping.
2789 */
David Woodhouse6941af22009-07-04 18:24:27 +01002790 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002791 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002792 ret = domain_add_dev_info(si_domain, pdev,
2793 hw_pass_through ?
2794 CONTEXT_TT_PASS_THROUGH :
2795 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002796 if (!ret) {
2797 printk(KERN_INFO "64bit %s uses identity mapping\n",
2798 pci_name(pdev));
2799 return 1;
2800 }
2801 }
2802 }
2803
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002804 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002805}
2806
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002807static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2808 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002809{
2810 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002811 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002812 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002813 struct iova *iova;
2814 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002815 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002816 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002817 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002818
2819 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002820
David Woodhouse73676832009-07-04 14:08:36 +01002821 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002822 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002823
2824 domain = get_valid_domain_for_dev(pdev);
2825 if (!domain)
2826 return 0;
2827
Weidong Han8c11e792008-12-08 15:29:22 +08002828 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002829 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002830
Mike Travisc681d0b2011-05-28 13:15:05 -05002831 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002832 if (!iova)
2833 goto error;
2834
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002835 /*
2836 * Check if DMAR supports zero-length reads on write only
2837 * mappings..
2838 */
2839 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002840 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002841 prot |= DMA_PTE_READ;
2842 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2843 prot |= DMA_PTE_WRITE;
2844 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002845 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002846 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002847 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002848 * is not a big problem
2849 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002850 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002851 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002852 if (ret)
2853 goto error;
2854
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002855 /* it's a non-present to present mapping. Only flush if caching mode */
2856 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002857 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002858 else
Weidong Han8c11e792008-12-08 15:29:22 +08002859 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002860
David Woodhouse03d6a242009-06-28 15:33:46 +01002861 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2862 start_paddr += paddr & ~PAGE_MASK;
2863 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002864
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002865error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002866 if (iova)
2867 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002868 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002869 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002870 return 0;
2871}
2872
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002873static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2874 unsigned long offset, size_t size,
2875 enum dma_data_direction dir,
2876 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002877{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002878 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2879 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002880}
2881
mark gross5e0d2a62008-03-04 15:22:08 -08002882static void flush_unmaps(void)
2883{
mark gross80b20dd2008-04-18 13:53:58 -07002884 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002885
mark gross5e0d2a62008-03-04 15:22:08 -08002886 timer_on = 0;
2887
2888 /* just flush them all */
2889 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002890 struct intel_iommu *iommu = g_iommus[i];
2891 if (!iommu)
2892 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002893
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002894 if (!deferred_flush[i].next)
2895 continue;
2896
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002897 /* In caching mode, global flushes turn emulation expensive */
2898 if (!cap_caching_mode(iommu->cap))
2899 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002900 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002901 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002902 unsigned long mask;
2903 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002904 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08002905
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002906 /* On real hardware multiple invalidations are expensive */
2907 if (cap_caching_mode(iommu->cap))
2908 iommu_flush_iotlb_psi(iommu, domain->id,
2909 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2910 else {
2911 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2912 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2913 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2914 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002915 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002916 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002917 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002918 }
2919
mark gross5e0d2a62008-03-04 15:22:08 -08002920 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002921}
2922
2923static void flush_unmaps_timeout(unsigned long data)
2924{
mark gross80b20dd2008-04-18 13:53:58 -07002925 unsigned long flags;
2926
2927 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002928 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002929 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002930}
2931
2932static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2933{
2934 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002935 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002936 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002937
2938 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002939 if (list_size == HIGH_WATER_MARK)
2940 flush_unmaps();
2941
Weidong Han8c11e792008-12-08 15:29:22 +08002942 iommu = domain_get_iommu(dom);
2943 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002944
mark gross80b20dd2008-04-18 13:53:58 -07002945 next = deferred_flush[iommu_id].next;
2946 deferred_flush[iommu_id].domain[next] = dom;
2947 deferred_flush[iommu_id].iova[next] = iova;
2948 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002949
2950 if (!timer_on) {
2951 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2952 timer_on = 1;
2953 }
2954 list_size++;
2955 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2956}
2957
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002958static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2959 size_t size, enum dma_data_direction dir,
2960 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002961{
2962 struct pci_dev *pdev = to_pci_dev(dev);
2963 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002964 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002965 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002966 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002967
David Woodhouse73676832009-07-04 14:08:36 +01002968 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002969 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002970
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002971 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002972 BUG_ON(!domain);
2973
Weidong Han8c11e792008-12-08 15:29:22 +08002974 iommu = domain_get_iommu(domain);
2975
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002976 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002977 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2978 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002979 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002980
David Woodhoused794dc92009-06-28 00:27:49 +01002981 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2982 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002983
David Woodhoused794dc92009-06-28 00:27:49 +01002984 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2985 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002986
2987 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002988 dma_pte_clear_range(domain, start_pfn, last_pfn);
2989
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002990 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002991 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2992
mark gross5e0d2a62008-03-04 15:22:08 -08002993 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002994 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002995 last_pfn - start_pfn + 1, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08002996 /* free iova */
2997 __free_iova(&domain->iovad, iova);
2998 } else {
2999 add_unmap(domain, iova);
3000 /*
3001 * queue up the release of the unmap to save the 1/6th of the
3002 * cpu used up by the iotlb flush operation...
3003 */
mark gross5e0d2a62008-03-04 15:22:08 -08003004 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003005}
3006
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003007static void *intel_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003008 dma_addr_t *dma_handle, gfp_t flags,
3009 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003010{
3011 void *vaddr;
3012 int order;
3013
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003014 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003015 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003016
3017 if (!iommu_no_mapping(hwdev))
3018 flags &= ~(GFP_DMA | GFP_DMA32);
3019 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
3020 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
3021 flags |= GFP_DMA;
3022 else
3023 flags |= GFP_DMA32;
3024 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003025
3026 vaddr = (void *)__get_free_pages(flags, order);
3027 if (!vaddr)
3028 return NULL;
3029 memset(vaddr, 0, size);
3030
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003031 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
3032 DMA_BIDIRECTIONAL,
3033 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003034 if (*dma_handle)
3035 return vaddr;
3036 free_pages((unsigned long)vaddr, order);
3037 return NULL;
3038}
3039
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003040static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003041 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003042{
3043 int order;
3044
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003045 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003046 order = get_order(size);
3047
David Woodhouse0db9b7a2009-07-14 02:01:57 +01003048 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003049 free_pages((unsigned long)vaddr, order);
3050}
3051
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003052static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
3053 int nelems, enum dma_data_direction dir,
3054 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003055{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003056 struct pci_dev *pdev = to_pci_dev(hwdev);
3057 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003058 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003059 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003060 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003061
David Woodhouse73676832009-07-04 14:08:36 +01003062 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003063 return;
3064
3065 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08003066 BUG_ON(!domain);
3067
3068 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003069
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003070 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01003071 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3072 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003073 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003074
David Woodhoused794dc92009-06-28 00:27:49 +01003075 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3076 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003077
3078 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01003079 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003080
David Woodhoused794dc92009-06-28 00:27:49 +01003081 /* free page tables */
3082 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3083
David Woodhouseacea0012009-07-14 01:55:11 +01003084 if (intel_iommu_strict) {
3085 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03003086 last_pfn - start_pfn + 1, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003087 /* free iova */
3088 __free_iova(&domain->iovad, iova);
3089 } else {
3090 add_unmap(domain, iova);
3091 /*
3092 * queue up the release of the unmap to save the 1/6th of the
3093 * cpu used up by the iotlb flush operation...
3094 */
3095 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003096}
3097
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003098static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003099 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003100{
3101 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003102 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003103
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003104 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003105 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003106 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003107 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003108 }
3109 return nelems;
3110}
3111
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003112static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3113 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003114{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003115 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003116 struct pci_dev *pdev = to_pci_dev(hwdev);
3117 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003118 size_t size = 0;
3119 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003120 struct iova *iova = NULL;
3121 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003122 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003123 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003124 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003125
3126 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01003127 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003128 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003129
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003130 domain = get_valid_domain_for_dev(pdev);
3131 if (!domain)
3132 return 0;
3133
Weidong Han8c11e792008-12-08 15:29:22 +08003134 iommu = domain_get_iommu(domain);
3135
David Woodhouseb536d242009-06-28 14:49:31 +01003136 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003137 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003138
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003139 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3140 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003141 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003142 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003143 return 0;
3144 }
3145
3146 /*
3147 * Check if DMAR supports zero-length reads on write only
3148 * mappings..
3149 */
3150 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003151 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003152 prot |= DMA_PTE_READ;
3153 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3154 prot |= DMA_PTE_WRITE;
3155
David Woodhouseb536d242009-06-28 14:49:31 +01003156 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003157
Fenghua Yuf5329592009-08-04 15:09:37 -07003158 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003159 if (unlikely(ret)) {
3160 /* clear the page */
3161 dma_pte_clear_range(domain, start_vpfn,
3162 start_vpfn + size - 1);
3163 /* free page tables */
3164 dma_pte_free_pagetable(domain, start_vpfn,
3165 start_vpfn + size - 1);
3166 /* free iova */
3167 __free_iova(&domain->iovad, iova);
3168 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003169 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003170
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003171 /* it's a non-present to present mapping. Only flush if caching mode */
3172 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03003173 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003174 else
Weidong Han8c11e792008-12-08 15:29:22 +08003175 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003176
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003177 return nelems;
3178}
3179
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003180static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3181{
3182 return !dma_addr;
3183}
3184
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003185struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003186 .alloc = intel_alloc_coherent,
3187 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003188 .map_sg = intel_map_sg,
3189 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003190 .map_page = intel_map_page,
3191 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003192 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003193};
3194
3195static inline int iommu_domain_cache_init(void)
3196{
3197 int ret = 0;
3198
3199 iommu_domain_cache = kmem_cache_create("iommu_domain",
3200 sizeof(struct dmar_domain),
3201 0,
3202 SLAB_HWCACHE_ALIGN,
3203
3204 NULL);
3205 if (!iommu_domain_cache) {
3206 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3207 ret = -ENOMEM;
3208 }
3209
3210 return ret;
3211}
3212
3213static inline int iommu_devinfo_cache_init(void)
3214{
3215 int ret = 0;
3216
3217 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3218 sizeof(struct device_domain_info),
3219 0,
3220 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003221 NULL);
3222 if (!iommu_devinfo_cache) {
3223 printk(KERN_ERR "Couldn't create devinfo cache\n");
3224 ret = -ENOMEM;
3225 }
3226
3227 return ret;
3228}
3229
3230static inline int iommu_iova_cache_init(void)
3231{
3232 int ret = 0;
3233
3234 iommu_iova_cache = kmem_cache_create("iommu_iova",
3235 sizeof(struct iova),
3236 0,
3237 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003238 NULL);
3239 if (!iommu_iova_cache) {
3240 printk(KERN_ERR "Couldn't create iova cache\n");
3241 ret = -ENOMEM;
3242 }
3243
3244 return ret;
3245}
3246
3247static int __init iommu_init_mempool(void)
3248{
3249 int ret;
3250 ret = iommu_iova_cache_init();
3251 if (ret)
3252 return ret;
3253
3254 ret = iommu_domain_cache_init();
3255 if (ret)
3256 goto domain_error;
3257
3258 ret = iommu_devinfo_cache_init();
3259 if (!ret)
3260 return ret;
3261
3262 kmem_cache_destroy(iommu_domain_cache);
3263domain_error:
3264 kmem_cache_destroy(iommu_iova_cache);
3265
3266 return -ENOMEM;
3267}
3268
3269static void __init iommu_exit_mempool(void)
3270{
3271 kmem_cache_destroy(iommu_devinfo_cache);
3272 kmem_cache_destroy(iommu_domain_cache);
3273 kmem_cache_destroy(iommu_iova_cache);
3274
3275}
3276
Dan Williams556ab452010-07-23 15:47:56 -07003277static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3278{
3279 struct dmar_drhd_unit *drhd;
3280 u32 vtbar;
3281 int rc;
3282
3283 /* We know that this device on this chipset has its own IOMMU.
3284 * If we find it under a different IOMMU, then the BIOS is lying
3285 * to us. Hope that the IOMMU for this device is actually
3286 * disabled, and it needs no translation...
3287 */
3288 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3289 if (rc) {
3290 /* "can't" happen */
3291 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3292 return;
3293 }
3294 vtbar &= 0xffff0000;
3295
3296 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3297 drhd = dmar_find_matched_drhd_unit(pdev);
3298 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3299 TAINT_FIRMWARE_WORKAROUND,
3300 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3301 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3302}
3303DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3304
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003305static void __init init_no_remapping_devices(void)
3306{
3307 struct dmar_drhd_unit *drhd;
3308
3309 for_each_drhd_unit(drhd) {
3310 if (!drhd->include_all) {
3311 int i;
3312 for (i = 0; i < drhd->devices_cnt; i++)
3313 if (drhd->devices[i] != NULL)
3314 break;
3315 /* ignore DMAR unit if no pci devices exist */
3316 if (i == drhd->devices_cnt)
3317 drhd->ignored = 1;
3318 }
3319 }
3320
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003321 for_each_drhd_unit(drhd) {
3322 int i;
3323 if (drhd->ignored || drhd->include_all)
3324 continue;
3325
3326 for (i = 0; i < drhd->devices_cnt; i++)
3327 if (drhd->devices[i] &&
David Woodhousec0771df2011-10-14 20:59:46 +01003328 !IS_GFX_DEVICE(drhd->devices[i]))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003329 break;
3330
3331 if (i < drhd->devices_cnt)
3332 continue;
3333
David Woodhousec0771df2011-10-14 20:59:46 +01003334 /* This IOMMU has *only* gfx devices. Either bypass it or
3335 set the gfx_mapped flag, as appropriate */
3336 if (dmar_map_gfx) {
3337 intel_iommu_gfx_mapped = 1;
3338 } else {
3339 drhd->ignored = 1;
3340 for (i = 0; i < drhd->devices_cnt; i++) {
3341 if (!drhd->devices[i])
3342 continue;
3343 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3344 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003345 }
3346 }
3347}
3348
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003349#ifdef CONFIG_SUSPEND
3350static int init_iommu_hw(void)
3351{
3352 struct dmar_drhd_unit *drhd;
3353 struct intel_iommu *iommu = NULL;
3354
3355 for_each_active_iommu(iommu, drhd)
3356 if (iommu->qi)
3357 dmar_reenable_qi(iommu);
3358
Joseph Cihulab7792602011-05-03 00:08:37 -07003359 for_each_iommu(iommu, drhd) {
3360 if (drhd->ignored) {
3361 /*
3362 * we always have to disable PMRs or DMA may fail on
3363 * this device
3364 */
3365 if (force_on)
3366 iommu_disable_protect_mem_regions(iommu);
3367 continue;
3368 }
3369
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003370 iommu_flush_write_buffer(iommu);
3371
3372 iommu_set_root_entry(iommu);
3373
3374 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003375 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003376 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003377 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003378 if (iommu_enable_translation(iommu))
3379 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003380 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003381 }
3382
3383 return 0;
3384}
3385
3386static void iommu_flush_all(void)
3387{
3388 struct dmar_drhd_unit *drhd;
3389 struct intel_iommu *iommu;
3390
3391 for_each_active_iommu(iommu, drhd) {
3392 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003393 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003394 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003395 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003396 }
3397}
3398
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003399static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003400{
3401 struct dmar_drhd_unit *drhd;
3402 struct intel_iommu *iommu = NULL;
3403 unsigned long flag;
3404
3405 for_each_active_iommu(iommu, drhd) {
3406 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3407 GFP_ATOMIC);
3408 if (!iommu->iommu_state)
3409 goto nomem;
3410 }
3411
3412 iommu_flush_all();
3413
3414 for_each_active_iommu(iommu, drhd) {
3415 iommu_disable_translation(iommu);
3416
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003417 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003418
3419 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3420 readl(iommu->reg + DMAR_FECTL_REG);
3421 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3422 readl(iommu->reg + DMAR_FEDATA_REG);
3423 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3424 readl(iommu->reg + DMAR_FEADDR_REG);
3425 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3426 readl(iommu->reg + DMAR_FEUADDR_REG);
3427
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003428 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003429 }
3430 return 0;
3431
3432nomem:
3433 for_each_active_iommu(iommu, drhd)
3434 kfree(iommu->iommu_state);
3435
3436 return -ENOMEM;
3437}
3438
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003439static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003440{
3441 struct dmar_drhd_unit *drhd;
3442 struct intel_iommu *iommu = NULL;
3443 unsigned long flag;
3444
3445 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003446 if (force_on)
3447 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3448 else
3449 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003450 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003451 }
3452
3453 for_each_active_iommu(iommu, drhd) {
3454
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003455 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003456
3457 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3458 iommu->reg + DMAR_FECTL_REG);
3459 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3460 iommu->reg + DMAR_FEDATA_REG);
3461 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3462 iommu->reg + DMAR_FEADDR_REG);
3463 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3464 iommu->reg + DMAR_FEUADDR_REG);
3465
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003466 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003467 }
3468
3469 for_each_active_iommu(iommu, drhd)
3470 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003471}
3472
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003473static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003474 .resume = iommu_resume,
3475 .suspend = iommu_suspend,
3476};
3477
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003478static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003479{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003480 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003481}
3482
3483#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003484static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003485#endif /* CONFIG_PM */
3486
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003487LIST_HEAD(dmar_rmrr_units);
3488
3489static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3490{
3491 list_add(&rmrr->list, &dmar_rmrr_units);
3492}
3493
3494
3495int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3496{
3497 struct acpi_dmar_reserved_memory *rmrr;
3498 struct dmar_rmrr_unit *rmrru;
3499
3500 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3501 if (!rmrru)
3502 return -ENOMEM;
3503
3504 rmrru->hdr = header;
3505 rmrr = (struct acpi_dmar_reserved_memory *)header;
3506 rmrru->base_address = rmrr->base_address;
3507 rmrru->end_address = rmrr->end_address;
3508
3509 dmar_register_rmrr_unit(rmrru);
3510 return 0;
3511}
3512
3513static int __init
3514rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3515{
3516 struct acpi_dmar_reserved_memory *rmrr;
3517 int ret;
3518
3519 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3520 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3521 ((void *)rmrr) + rmrr->header.length,
3522 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3523
3524 if (ret || (rmrru->devices_cnt == 0)) {
3525 list_del(&rmrru->list);
3526 kfree(rmrru);
3527 }
3528 return ret;
3529}
3530
3531static LIST_HEAD(dmar_atsr_units);
3532
3533int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3534{
3535 struct acpi_dmar_atsr *atsr;
3536 struct dmar_atsr_unit *atsru;
3537
3538 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3539 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3540 if (!atsru)
3541 return -ENOMEM;
3542
3543 atsru->hdr = hdr;
3544 atsru->include_all = atsr->flags & 0x1;
3545
3546 list_add(&atsru->list, &dmar_atsr_units);
3547
3548 return 0;
3549}
3550
3551static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3552{
3553 int rc;
3554 struct acpi_dmar_atsr *atsr;
3555
3556 if (atsru->include_all)
3557 return 0;
3558
3559 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3560 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3561 (void *)atsr + atsr->header.length,
3562 &atsru->devices_cnt, &atsru->devices,
3563 atsr->segment);
3564 if (rc || !atsru->devices_cnt) {
3565 list_del(&atsru->list);
3566 kfree(atsru);
3567 }
3568
3569 return rc;
3570}
3571
3572int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3573{
3574 int i;
3575 struct pci_bus *bus;
3576 struct acpi_dmar_atsr *atsr;
3577 struct dmar_atsr_unit *atsru;
3578
3579 dev = pci_physfn(dev);
3580
3581 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3582 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3583 if (atsr->segment == pci_domain_nr(dev->bus))
3584 goto found;
3585 }
3586
3587 return 0;
3588
3589found:
3590 for (bus = dev->bus; bus; bus = bus->parent) {
3591 struct pci_dev *bridge = bus->self;
3592
3593 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003594 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003595 return 0;
3596
Yijing Wang62f87c02012-07-24 17:20:03 +08003597 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003598 for (i = 0; i < atsru->devices_cnt; i++)
3599 if (atsru->devices[i] == bridge)
3600 return 1;
3601 break;
3602 }
3603 }
3604
3605 if (atsru->include_all)
3606 return 1;
3607
3608 return 0;
3609}
3610
Sergey Senozhatskyc8f369a2011-10-26 18:45:39 +03003611int __init dmar_parse_rmrr_atsr_dev(void)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003612{
3613 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3614 struct dmar_atsr_unit *atsr, *atsr_n;
3615 int ret = 0;
3616
3617 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3618 ret = rmrr_parse_dev(rmrr);
3619 if (ret)
3620 return ret;
3621 }
3622
3623 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3624 ret = atsr_parse_dev(atsr);
3625 if (ret)
3626 return ret;
3627 }
3628
3629 return ret;
3630}
3631
Fenghua Yu99dcade2009-11-11 07:23:06 -08003632/*
3633 * Here we only respond to action of unbound device from driver.
3634 *
3635 * Added device is not attached to its DMAR domain here yet. That will happen
3636 * when mapping the device to iova.
3637 */
3638static int device_notifier(struct notifier_block *nb,
3639 unsigned long action, void *data)
3640{
3641 struct device *dev = data;
3642 struct pci_dev *pdev = to_pci_dev(dev);
3643 struct dmar_domain *domain;
3644
David Woodhouse44cd6132009-12-02 10:18:30 +00003645 if (iommu_no_mapping(dev))
3646 return 0;
3647
Fenghua Yu99dcade2009-11-11 07:23:06 -08003648 domain = find_domain(pdev);
3649 if (!domain)
3650 return 0;
3651
Alex Williamsona97590e2011-03-04 14:52:16 -07003652 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
Fenghua Yu99dcade2009-11-11 07:23:06 -08003653 domain_remove_one_dev_info(domain, pdev);
3654
Alex Williamsona97590e2011-03-04 14:52:16 -07003655 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3656 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3657 list_empty(&domain->devices))
3658 domain_exit(domain);
3659 }
3660
Fenghua Yu99dcade2009-11-11 07:23:06 -08003661 return 0;
3662}
3663
3664static struct notifier_block device_nb = {
3665 .notifier_call = device_notifier,
3666};
3667
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003668int __init intel_iommu_init(void)
3669{
3670 int ret = 0;
Takao Indoh3a93c842013-04-23 17:35:03 +09003671 struct dmar_drhd_unit *drhd;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003672
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003673 /* VT-d is required for a TXT/tboot launch, so enforce that */
3674 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003675
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003676 if (dmar_table_init()) {
3677 if (force_on)
3678 panic("tboot: Failed to initialize DMAR table\n");
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003679 return -ENODEV;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003680 }
3681
Takao Indoh3a93c842013-04-23 17:35:03 +09003682 /*
3683 * Disable translation if already enabled prior to OS handover.
3684 */
3685 for_each_drhd_unit(drhd) {
3686 struct intel_iommu *iommu;
3687
3688 if (drhd->ignored)
3689 continue;
3690
3691 iommu = drhd->iommu;
3692 if (iommu->gcmd & DMA_GCMD_TE)
3693 iommu_disable_translation(iommu);
3694 }
3695
Suresh Siddhac2c72862011-08-23 17:05:19 -07003696 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003697 if (force_on)
3698 panic("tboot: Failed to initialize DMAR device scope\n");
3699 return -ENODEV;
3700 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003701
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003702 if (no_iommu || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003703 return -ENODEV;
3704
Joseph Cihula51a63e62011-03-21 11:04:24 -07003705 if (iommu_init_mempool()) {
3706 if (force_on)
3707 panic("tboot: Failed to initialize iommu memory\n");
3708 return -ENODEV;
3709 }
3710
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003711 if (list_empty(&dmar_rmrr_units))
3712 printk(KERN_INFO "DMAR: No RMRR found\n");
3713
3714 if (list_empty(&dmar_atsr_units))
3715 printk(KERN_INFO "DMAR: No ATSR found\n");
3716
Joseph Cihula51a63e62011-03-21 11:04:24 -07003717 if (dmar_init_reserved_ranges()) {
3718 if (force_on)
3719 panic("tboot: Failed to reserve iommu ranges\n");
3720 return -ENODEV;
3721 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003722
3723 init_no_remapping_devices();
3724
Joseph Cihulab7792602011-05-03 00:08:37 -07003725 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003726 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003727 if (force_on)
3728 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003729 printk(KERN_ERR "IOMMU: dmar init failed\n");
3730 put_iova_domain(&reserved_iova_list);
3731 iommu_exit_mempool();
3732 return ret;
3733 }
3734 printk(KERN_INFO
3735 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3736
mark gross5e0d2a62008-03-04 15:22:08 -08003737 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003738#ifdef CONFIG_SWIOTLB
3739 swiotlb = 0;
3740#endif
David Woodhouse19943b02009-08-04 16:19:20 +01003741 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003742
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003743 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003744
Joerg Roedel4236d97d2011-09-06 17:56:07 +02003745 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003746
Fenghua Yu99dcade2009-11-11 07:23:06 -08003747 bus_register_notifier(&pci_bus_type, &device_nb);
3748
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02003749 intel_iommu_enabled = 1;
3750
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003751 return 0;
3752}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003753
Han, Weidong3199aa62009-02-26 17:31:12 +08003754static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3755 struct pci_dev *pdev)
3756{
3757 struct pci_dev *tmp, *parent;
3758
3759 if (!iommu || !pdev)
3760 return;
3761
3762 /* dependent device detach */
3763 tmp = pci_find_upstream_pcie_bridge(pdev);
3764 /* Secondary interface's bus number and devfn 0 */
3765 if (tmp) {
3766 parent = pdev->bus->self;
3767 while (parent != tmp) {
3768 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003769 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003770 parent = parent->bus->self;
3771 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05003772 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08003773 iommu_detach_dev(iommu,
3774 tmp->subordinate->number, 0);
3775 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003776 iommu_detach_dev(iommu, tmp->bus->number,
3777 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003778 }
3779}
3780
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003781static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003782 struct pci_dev *pdev)
3783{
Yijing Wangbca2b912013-10-31 17:26:04 +08003784 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08003785 struct intel_iommu *iommu;
3786 unsigned long flags;
3787 int found = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +08003788
David Woodhouse276dbf992009-04-04 01:45:37 +01003789 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3790 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003791 if (!iommu)
3792 return;
3793
3794 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08003795 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
Mike Habeck8519dc42011-05-28 13:15:07 -05003796 if (info->segment == pci_domain_nr(pdev->bus) &&
3797 info->bus == pdev->bus->number &&
Weidong Hanc7151a82008-12-08 22:51:37 +08003798 info->devfn == pdev->devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01003799 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003800 spin_unlock_irqrestore(&device_domain_lock, flags);
3801
Yu Zhao93a23a72009-05-18 13:51:37 +08003802 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003803 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003804 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003805 free_devinfo_mem(info);
3806
3807 spin_lock_irqsave(&device_domain_lock, flags);
3808
3809 if (found)
3810 break;
3811 else
3812 continue;
3813 }
3814
3815 /* if there is no other devices under the same iommu
3816 * owned by this domain, clear this iommu in iommu_bmp
3817 * update iommu count and coherency
3818 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003819 if (iommu == device_to_iommu(info->segment, info->bus,
3820 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003821 found = 1;
3822 }
3823
Roland Dreier3e7abe22011-07-20 06:22:21 -07003824 spin_unlock_irqrestore(&device_domain_lock, flags);
3825
Weidong Hanc7151a82008-12-08 22:51:37 +08003826 if (found == 0) {
3827 unsigned long tmp_flags;
3828 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08003829 clear_bit(iommu->seq_id, domain->iommu_bmp);
Weidong Hanc7151a82008-12-08 22:51:37 +08003830 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003831 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003832 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07003833
Alex Williamson9b4554b2011-05-24 12:19:04 -04003834 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3835 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3836 spin_lock_irqsave(&iommu->lock, tmp_flags);
3837 clear_bit(domain->id, iommu->domain_ids);
3838 iommu->domains[domain->id] = NULL;
3839 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3840 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003841 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003842}
3843
3844static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3845{
3846 struct device_domain_info *info;
3847 struct intel_iommu *iommu;
3848 unsigned long flags1, flags2;
3849
3850 spin_lock_irqsave(&device_domain_lock, flags1);
3851 while (!list_empty(&domain->devices)) {
3852 info = list_entry(domain->devices.next,
3853 struct device_domain_info, link);
David Woodhouse109b9b02012-05-25 17:43:02 +01003854 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003855 spin_unlock_irqrestore(&device_domain_lock, flags1);
3856
Yu Zhao93a23a72009-05-18 13:51:37 +08003857 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003858 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003859 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003860 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003861
3862 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003863 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003864 */
3865 spin_lock_irqsave(&domain->iommu_lock, flags2);
3866 if (test_and_clear_bit(iommu->seq_id,
Mike Travis1b198bb2012-03-05 15:05:16 -08003867 domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08003868 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003869 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003870 }
3871 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3872
3873 free_devinfo_mem(info);
3874 spin_lock_irqsave(&device_domain_lock, flags1);
3875 }
3876 spin_unlock_irqrestore(&device_domain_lock, flags1);
3877}
3878
Weidong Han5e98c4b2008-12-08 23:03:27 +08003879/* domain id for virtual machine, it won't be set in context */
3880static unsigned long vm_domid;
3881
3882static struct dmar_domain *iommu_alloc_vm_domain(void)
3883{
3884 struct dmar_domain *domain;
3885
3886 domain = alloc_domain_mem();
3887 if (!domain)
3888 return NULL;
3889
3890 domain->id = vm_domid++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003891 domain->nid = -1;
Mike Travis1b198bb2012-03-05 15:05:16 -08003892 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003893 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3894
3895 return domain;
3896}
3897
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003898static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003899{
3900 int adjust_width;
3901
3902 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003903 spin_lock_init(&domain->iommu_lock);
3904
3905 domain_reserve_special_ranges(domain);
3906
3907 /* calculate AGAW */
3908 domain->gaw = guest_width;
3909 adjust_width = guestwidth_to_adjustwidth(guest_width);
3910 domain->agaw = width_to_agaw(adjust_width);
3911
3912 INIT_LIST_HEAD(&domain->devices);
3913
3914 domain->iommu_count = 0;
3915 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003916 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01003917 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003918 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003919 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003920
3921 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07003922 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003923 if (!domain->pgd)
3924 return -ENOMEM;
3925 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3926 return 0;
3927}
3928
3929static void iommu_free_vm_domain(struct dmar_domain *domain)
3930{
3931 unsigned long flags;
3932 struct dmar_drhd_unit *drhd;
3933 struct intel_iommu *iommu;
3934 unsigned long i;
3935 unsigned long ndomains;
3936
3937 for_each_drhd_unit(drhd) {
3938 if (drhd->ignored)
3939 continue;
3940 iommu = drhd->iommu;
3941
3942 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08003943 for_each_set_bit(i, iommu->domain_ids, ndomains) {
Weidong Han5e98c4b2008-12-08 23:03:27 +08003944 if (iommu->domains[i] == domain) {
3945 spin_lock_irqsave(&iommu->lock, flags);
3946 clear_bit(i, iommu->domain_ids);
3947 iommu->domains[i] = NULL;
3948 spin_unlock_irqrestore(&iommu->lock, flags);
3949 break;
3950 }
Weidong Han5e98c4b2008-12-08 23:03:27 +08003951 }
3952 }
3953}
3954
3955static void vm_domain_exit(struct dmar_domain *domain)
3956{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003957 /* Domain 0 is reserved, so dont process it */
3958 if (!domain)
3959 return;
3960
3961 vm_domain_remove_all_dev_info(domain);
3962 /* destroy iovas */
3963 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003964
3965 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003966 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003967
3968 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003969 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003970
3971 iommu_free_vm_domain(domain);
3972 free_domain_mem(domain);
3973}
3974
Joerg Roedel5d450802008-12-03 14:52:32 +01003975static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003976{
Joerg Roedel5d450802008-12-03 14:52:32 +01003977 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003978
Joerg Roedel5d450802008-12-03 14:52:32 +01003979 dmar_domain = iommu_alloc_vm_domain();
3980 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003981 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003982 "intel_iommu_domain_init: dmar_domain == NULL\n");
3983 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003984 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003985 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003986 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003987 "intel_iommu_domain_init() failed\n");
3988 vm_domain_exit(dmar_domain);
3989 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003990 }
Allen Kay8140a952011-10-14 12:32:17 -07003991 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01003992 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003993
Joerg Roedel8a0e7152012-01-26 19:40:54 +01003994 domain->geometry.aperture_start = 0;
3995 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
3996 domain->geometry.force_aperture = true;
3997
Joerg Roedel5d450802008-12-03 14:52:32 +01003998 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003999}
Kay, Allen M38717942008-09-09 18:37:29 +03004000
Joerg Roedel5d450802008-12-03 14:52:32 +01004001static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004002{
Joerg Roedel5d450802008-12-03 14:52:32 +01004003 struct dmar_domain *dmar_domain = domain->priv;
4004
4005 domain->priv = NULL;
4006 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004007}
Kay, Allen M38717942008-09-09 18:37:29 +03004008
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004009static int intel_iommu_attach_device(struct iommu_domain *domain,
4010 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004011{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004012 struct dmar_domain *dmar_domain = domain->priv;
4013 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004014 struct intel_iommu *iommu;
4015 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03004016
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004017 /* normally pdev is not mapped */
4018 if (unlikely(domain_context_mapped(pdev))) {
4019 struct dmar_domain *old_domain;
4020
4021 old_domain = find_domain(pdev);
4022 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004023 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
4024 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
4025 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004026 else
4027 domain_remove_dev_info(old_domain);
4028 }
4029 }
4030
David Woodhouse276dbf992009-04-04 01:45:37 +01004031 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
4032 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004033 if (!iommu)
4034 return -ENODEV;
4035
4036 /* check if this iommu agaw is sufficient for max mapped address */
4037 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004038 if (addr_width > cap_mgaw(iommu->cap))
4039 addr_width = cap_mgaw(iommu->cap);
4040
4041 if (dmar_domain->max_addr > (1LL << addr_width)) {
4042 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004043 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004044 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004045 return -EFAULT;
4046 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004047 dmar_domain->gaw = addr_width;
4048
4049 /*
4050 * Knock out extra levels of page tables if necessary
4051 */
4052 while (iommu->agaw < dmar_domain->agaw) {
4053 struct dma_pte *pte;
4054
4055 pte = dmar_domain->pgd;
4056 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004057 dmar_domain->pgd = (struct dma_pte *)
4058 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004059 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004060 }
4061 dmar_domain->agaw--;
4062 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004063
David Woodhouse5fe60f42009-08-09 10:53:41 +01004064 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004065}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004066
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004067static void intel_iommu_detach_device(struct iommu_domain *domain,
4068 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004069{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004070 struct dmar_domain *dmar_domain = domain->priv;
4071 struct pci_dev *pdev = to_pci_dev(dev);
4072
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004073 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03004074}
Kay, Allen M38717942008-09-09 18:37:29 +03004075
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004076static int intel_iommu_map(struct iommu_domain *domain,
4077 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004078 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004079{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004080 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004081 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004082 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004083 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004084
Joerg Roedeldde57a22008-12-03 15:04:09 +01004085 if (iommu_prot & IOMMU_READ)
4086 prot |= DMA_PTE_READ;
4087 if (iommu_prot & IOMMU_WRITE)
4088 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004089 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4090 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004091
David Woodhouse163cc522009-06-28 00:51:17 +01004092 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004093 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004094 u64 end;
4095
4096 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004097 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004098 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004099 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004100 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004101 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004102 return -EFAULT;
4103 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004104 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004105 }
David Woodhousead051222009-06-28 14:22:28 +01004106 /* Round up size to next multiple of PAGE_SIZE, if it and
4107 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004108 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004109 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4110 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004111 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004112}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004113
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004114static size_t intel_iommu_unmap(struct iommu_domain *domain,
4115 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004116{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004117 struct dmar_domain *dmar_domain = domain->priv;
Allen Kay292827c2011-10-14 12:31:54 -07004118 int order;
Sheng Yang4b99d352009-07-08 11:52:52 +01004119
Allen Kay292827c2011-10-14 12:31:54 -07004120 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
David Woodhouse163cc522009-06-28 00:51:17 +01004121 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004122
David Woodhouse163cc522009-06-28 00:51:17 +01004123 if (dmar_domain->max_addr == iova + size)
4124 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004125
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004126 return PAGE_SIZE << order;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004127}
Kay, Allen M38717942008-09-09 18:37:29 +03004128
Joerg Roedeld14d6572008-12-03 15:06:57 +01004129static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05304130 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004131{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004132 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004133 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004134 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004135
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004136 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
Kay, Allen M38717942008-09-09 18:37:29 +03004137 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004138 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004139
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004140 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004141}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004142
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004143static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4144 unsigned long cap)
4145{
4146 struct dmar_domain *dmar_domain = domain->priv;
4147
4148 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4149 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004150 if (cap == IOMMU_CAP_INTR_REMAP)
Suresh Siddha95a02e92012-03-30 11:47:07 -07004151 return irq_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004152
4153 return 0;
4154}
4155
Alex Williamson783f1572012-05-30 14:19:43 -06004156#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4157
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004158static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004159{
4160 struct pci_dev *pdev = to_pci_dev(dev);
Alex Williamson3da4af0a2012-11-13 10:22:03 -07004161 struct pci_dev *bridge, *dma_pdev = NULL;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004162 struct iommu_group *group;
4163 int ret;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004164
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004165 if (!device_to_iommu(pci_domain_nr(pdev->bus),
4166 pdev->bus->number, pdev->devfn))
Alex Williamson70ae6f02011-10-21 15:56:11 -04004167 return -ENODEV;
4168
4169 bridge = pci_find_upstream_pcie_bridge(pdev);
4170 if (bridge) {
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004171 if (pci_is_pcie(bridge))
4172 dma_pdev = pci_get_domain_bus_and_slot(
4173 pci_domain_nr(pdev->bus),
4174 bridge->subordinate->number, 0);
Alex Williamson3da4af0a2012-11-13 10:22:03 -07004175 if (!dma_pdev)
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004176 dma_pdev = pci_dev_get(bridge);
4177 } else
4178 dma_pdev = pci_dev_get(pdev);
4179
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004180 /* Account for quirked devices */
Alex Williamson783f1572012-05-30 14:19:43 -06004181 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
4182
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004183 /*
4184 * If it's a multifunction device that does not support our
Alex Williamsonc14d2692013-05-30 12:39:18 -06004185 * required ACS flags, add to the same group as lowest numbered
4186 * function that also does not suport the required ACS flags.
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004187 */
Alex Williamson783f1572012-05-30 14:19:43 -06004188 if (dma_pdev->multifunction &&
Alex Williamsonc14d2692013-05-30 12:39:18 -06004189 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
4190 u8 i, slot = PCI_SLOT(dma_pdev->devfn);
4191
4192 for (i = 0; i < 8; i++) {
4193 struct pci_dev *tmp;
4194
4195 tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
4196 if (!tmp)
4197 continue;
4198
4199 if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
4200 swap_pci_ref(&dma_pdev, tmp);
4201 break;
4202 }
4203 pci_dev_put(tmp);
4204 }
4205 }
Alex Williamson783f1572012-05-30 14:19:43 -06004206
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004207 /*
4208 * Devices on the root bus go through the iommu. If that's not us,
4209 * find the next upstream device and test ACS up to the root bus.
4210 * Finding the next device may require skipping virtual buses.
4211 */
Alex Williamson783f1572012-05-30 14:19:43 -06004212 while (!pci_is_root_bus(dma_pdev->bus)) {
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004213 struct pci_bus *bus = dma_pdev->bus;
4214
4215 while (!bus->self) {
4216 if (!pci_is_root_bus(bus))
4217 bus = bus->parent;
4218 else
4219 goto root_bus;
4220 }
4221
4222 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
Alex Williamson783f1572012-05-30 14:19:43 -06004223 break;
4224
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004225 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
Alex Williamson70ae6f02011-10-21 15:56:11 -04004226 }
4227
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004228root_bus:
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004229 group = iommu_group_get(&dma_pdev->dev);
4230 pci_dev_put(dma_pdev);
4231 if (!group) {
4232 group = iommu_group_alloc();
4233 if (IS_ERR(group))
4234 return PTR_ERR(group);
4235 }
Alex Williamsonbcb71ab2011-10-21 15:56:24 -04004236
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004237 ret = iommu_group_add_device(group, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004238
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004239 iommu_group_put(group);
4240 return ret;
4241}
4242
4243static void intel_iommu_remove_device(struct device *dev)
4244{
4245 iommu_group_remove_device(dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004246}
4247
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004248static struct iommu_ops intel_iommu_ops = {
4249 .domain_init = intel_iommu_domain_init,
4250 .domain_destroy = intel_iommu_domain_destroy,
4251 .attach_dev = intel_iommu_attach_device,
4252 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004253 .map = intel_iommu_map,
4254 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004255 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004256 .domain_has_cap = intel_iommu_domain_has_cap,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004257 .add_device = intel_iommu_add_device,
4258 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004259 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004260};
David Woodhouse9af88142009-02-13 23:18:03 +00004261
Daniel Vetter94526182013-01-20 23:50:13 +01004262static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4263{
4264 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4265 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4266 dmar_map_gfx = 0;
4267}
4268
4269DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4270DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4271DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4272DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4273DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4274DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4275DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4276
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004277static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004278{
4279 /*
4280 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004281 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004282 */
4283 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4284 rwbf_quirk = 1;
4285}
4286
4287DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004288DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4289DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4290DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4291DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4292DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4293DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004294
Adam Jacksoneecfd572010-08-25 21:17:34 +01004295#define GGC 0x52
4296#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4297#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4298#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4299#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4300#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4301#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4302#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4303#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4304
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004305static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004306{
4307 unsigned short ggc;
4308
Adam Jacksoneecfd572010-08-25 21:17:34 +01004309 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004310 return;
4311
Adam Jacksoneecfd572010-08-25 21:17:34 +01004312 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004313 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4314 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004315 } else if (dmar_map_gfx) {
4316 /* we have to ensure the gfx device is idle before we flush */
4317 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4318 intel_iommu_strict = 1;
4319 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004320}
4321DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4322DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4323DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4324DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4325
David Woodhousee0fc7e02009-09-30 09:12:17 -07004326/* On Tylersburg chipsets, some BIOSes have been known to enable the
4327 ISOCH DMAR unit for the Azalia sound device, but not give it any
4328 TLB entries, which causes it to deadlock. Check for that. We do
4329 this in a function called from init_dmars(), instead of in a PCI
4330 quirk, because we don't want to print the obnoxious "BIOS broken"
4331 message if VT-d is actually disabled.
4332*/
4333static void __init check_tylersburg_isoch(void)
4334{
4335 struct pci_dev *pdev;
4336 uint32_t vtisochctrl;
4337
4338 /* If there's no Azalia in the system anyway, forget it. */
4339 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4340 if (!pdev)
4341 return;
4342 pci_dev_put(pdev);
4343
4344 /* System Management Registers. Might be hidden, in which case
4345 we can't do the sanity check. But that's OK, because the
4346 known-broken BIOSes _don't_ actually hide it, so far. */
4347 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4348 if (!pdev)
4349 return;
4350
4351 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4352 pci_dev_put(pdev);
4353 return;
4354 }
4355
4356 pci_dev_put(pdev);
4357
4358 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4359 if (vtisochctrl & 1)
4360 return;
4361
4362 /* Drop all bits other than the number of TLB entries */
4363 vtisochctrl &= 0x1c;
4364
4365 /* If we have the recommended number of TLB entries (16), fine. */
4366 if (vtisochctrl == 0x10)
4367 return;
4368
4369 /* Zero TLB entries? You get to ride the short bus to school. */
4370 if (!vtisochctrl) {
4371 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4372 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4373 dmi_get_system_info(DMI_BIOS_VENDOR),
4374 dmi_get_system_info(DMI_BIOS_VERSION),
4375 dmi_get_system_info(DMI_PRODUCT_VERSION));
4376 iommu_identity_mapping |= IDENTMAP_AZALIA;
4377 return;
4378 }
4379
4380 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4381 vtisochctrl);
4382}