blob: 15e9b57e9cf05ba43e19d76f37e64275d3e6db44 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040027#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070028#include <linux/slab.h>
29#include <linux/irq.h>
30#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070031#include <linux/spinlock.h>
32#include <linux/pci.h>
33#include <linux/dmar.h>
34#include <linux/dma-mapping.h>
35#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080044#include <linux/memblock.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070045#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090047#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070048
Joerg Roedel078e1ee2012-09-26 12:44:43 +020049#include "irq_remapping.h"
Varun Sethi61e015a2013-04-23 10:05:24 +053050#include "pci.h"
Joerg Roedel078e1ee2012-09-26 12:44:43 +020051
Fenghua Yu5b6985c2008-10-16 18:02:32 -070052#define ROOT_SIZE VTD_PAGE_SIZE
53#define CONTEXT_SIZE VTD_PAGE_SIZE
54
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
56#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070057#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058
59#define IOAPIC_RANGE_START (0xfee00000)
60#define IOAPIC_RANGE_END (0xfeefffff)
61#define IOVA_START_ADDR (0x1000)
62
63#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
64
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070065#define MAX_AGAW_WIDTH 64
66
David Woodhouse2ebe3152009-09-19 07:34:04 -070067#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
68#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
69
70/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
71 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
72#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
73 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
74#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070075
Mark McLoughlinf27be032008-11-20 15:49:43 +000076#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070077#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070078#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080079
Andrew Mortondf08cdc2010-09-22 13:05:11 -070080/* page table handling */
81#define LEVEL_STRIDE (9)
82#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
83
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020084/*
85 * This bitmap is used to advertise the page sizes our hardware support
86 * to the IOMMU core, which will then use this information to split
87 * physically contiguous memory regions it is mapping into page sizes
88 * that we support.
89 *
90 * Traditionally the IOMMU core just handed us the mappings directly,
91 * after making sure the size is an order of a 4KiB page and that the
92 * mapping has natural alignment.
93 *
94 * To retain this behavior, we currently advertise that we support
95 * all page sizes that are an order of 4KiB.
96 *
97 * If at some point we'd like to utilize the IOMMU core's new behavior,
98 * we could change this to advertise the real page sizes we support.
99 */
100#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
101
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700102static inline int agaw_to_level(int agaw)
103{
104 return agaw + 2;
105}
106
107static inline int agaw_to_width(int agaw)
108{
109 return 30 + agaw * LEVEL_STRIDE;
110}
111
112static inline int width_to_agaw(int width)
113{
114 return (width - 30) / LEVEL_STRIDE;
115}
116
117static inline unsigned int level_to_offset_bits(int level)
118{
119 return (level - 1) * LEVEL_STRIDE;
120}
121
122static inline int pfn_level_offset(unsigned long pfn, int level)
123{
124 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
125}
126
127static inline unsigned long level_mask(int level)
128{
129 return -1UL << level_to_offset_bits(level);
130}
131
132static inline unsigned long level_size(int level)
133{
134 return 1UL << level_to_offset_bits(level);
135}
136
137static inline unsigned long align_to_level(unsigned long pfn, int level)
138{
139 return (pfn + level_size(level) - 1) & level_mask(level);
140}
David Woodhousefd18de52009-05-10 23:57:41 +0100141
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100142static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
143{
144 return 1 << ((lvl - 1) * LEVEL_STRIDE);
145}
146
David Woodhousedd4e8312009-06-27 16:21:20 +0100147/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
148 are never going to work. */
149static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
150{
151 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
152}
153
154static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
155{
156 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
157}
158static inline unsigned long page_to_dma_pfn(struct page *pg)
159{
160 return mm_to_dma_pfn(page_to_pfn(pg));
161}
162static inline unsigned long virt_to_dma_pfn(void *p)
163{
164 return page_to_dma_pfn(virt_to_page(p));
165}
166
Weidong Hand9630fe2008-12-08 11:06:32 +0800167/* global iommu list, set NULL for ignored DMAR units */
168static struct intel_iommu **g_iommus;
169
David Woodhousee0fc7e02009-09-30 09:12:17 -0700170static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000171static int rwbf_quirk;
172
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000173/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700174 * set to 1 to panic kernel if can't successfully enable VT-d
175 * (used when kernel is launched w/ TXT)
176 */
177static int force_on = 0;
178
179/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000180 * 0: Present
181 * 1-11: Reserved
182 * 12-63: Context Ptr (12 - (haw-1))
183 * 64-127: Reserved
184 */
185struct root_entry {
186 u64 val;
187 u64 rsvd1;
188};
189#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
190static inline bool root_present(struct root_entry *root)
191{
192 return (root->val & 1);
193}
194static inline void set_root_present(struct root_entry *root)
195{
196 root->val |= 1;
197}
198static inline void set_root_value(struct root_entry *root, unsigned long value)
199{
200 root->val |= value & VTD_PAGE_MASK;
201}
202
203static inline struct context_entry *
204get_context_addr_from_root(struct root_entry *root)
205{
206 return (struct context_entry *)
207 (root_present(root)?phys_to_virt(
208 root->val & VTD_PAGE_MASK) :
209 NULL);
210}
211
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000212/*
213 * low 64 bits:
214 * 0: present
215 * 1: fault processing disable
216 * 2-3: translation type
217 * 12-63: address space root
218 * high 64 bits:
219 * 0-2: address width
220 * 3-6: aval
221 * 8-23: domain id
222 */
223struct context_entry {
224 u64 lo;
225 u64 hi;
226};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000227
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000228static inline bool context_present(struct context_entry *context)
229{
230 return (context->lo & 1);
231}
232static inline void context_set_present(struct context_entry *context)
233{
234 context->lo |= 1;
235}
236
237static inline void context_set_fault_enable(struct context_entry *context)
238{
239 context->lo &= (((u64)-1) << 2) | 1;
240}
241
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000242static inline void context_set_translation_type(struct context_entry *context,
243 unsigned long value)
244{
245 context->lo &= (((u64)-1) << 4) | 3;
246 context->lo |= (value & 3) << 2;
247}
248
249static inline void context_set_address_root(struct context_entry *context,
250 unsigned long value)
251{
252 context->lo |= value & VTD_PAGE_MASK;
253}
254
255static inline void context_set_address_width(struct context_entry *context,
256 unsigned long value)
257{
258 context->hi |= value & 7;
259}
260
261static inline void context_set_domain_id(struct context_entry *context,
262 unsigned long value)
263{
264 context->hi |= (value & ((1 << 16) - 1)) << 8;
265}
266
267static inline void context_clear_entry(struct context_entry *context)
268{
269 context->lo = 0;
270 context->hi = 0;
271}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000272
Mark McLoughlin622ba122008-11-20 15:49:46 +0000273/*
274 * 0: readable
275 * 1: writable
276 * 2-6: reserved
277 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800278 * 8-10: available
279 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000280 * 12-63: Host physcial address
281 */
282struct dma_pte {
283 u64 val;
284};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000285
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000286static inline void dma_clear_pte(struct dma_pte *pte)
287{
288 pte->val = 0;
289}
290
291static inline void dma_set_pte_readable(struct dma_pte *pte)
292{
293 pte->val |= DMA_PTE_READ;
294}
295
296static inline void dma_set_pte_writable(struct dma_pte *pte)
297{
298 pte->val |= DMA_PTE_WRITE;
299}
300
Sheng Yang9cf066972009-03-18 15:33:07 +0800301static inline void dma_set_pte_snp(struct dma_pte *pte)
302{
303 pte->val |= DMA_PTE_SNP;
304}
305
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000306static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
307{
308 pte->val = (pte->val & ~3) | (prot & 3);
309}
310
311static inline u64 dma_pte_addr(struct dma_pte *pte)
312{
David Woodhousec85994e2009-07-01 19:21:24 +0100313#ifdef CONFIG_64BIT
314 return pte->val & VTD_PAGE_MASK;
315#else
316 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100317 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100318#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000319}
320
David Woodhousedd4e8312009-06-27 16:21:20 +0100321static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000322{
David Woodhousedd4e8312009-06-27 16:21:20 +0100323 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000324}
325
326static inline bool dma_pte_present(struct dma_pte *pte)
327{
328 return (pte->val & 3) != 0;
329}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000330
Allen Kay4399c8b2011-10-14 12:32:46 -0700331static inline bool dma_pte_superpage(struct dma_pte *pte)
332{
333 return (pte->val & (1 << 7));
334}
335
David Woodhouse75e6bf92009-07-02 11:21:16 +0100336static inline int first_pte_in_page(struct dma_pte *pte)
337{
338 return !((unsigned long)pte & ~VTD_PAGE_MASK);
339}
340
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700341/*
342 * This domain is a statically identity mapping domain.
343 * 1. This domain creats a static 1:1 mapping to all usable memory.
344 * 2. It maps to each iommu if successful.
345 * 3. Each iommu mapps to this domain if successful.
346 */
David Woodhouse19943b02009-08-04 16:19:20 +0100347static struct dmar_domain *si_domain;
348static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700349
Weidong Han3b5410e2008-12-08 09:17:15 +0800350/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100351#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800352
Weidong Han1ce28fe2008-12-08 16:35:39 +0800353/* domain represents a virtual machine, more than one devices
354 * across iommus may be owned in one domain, e.g. kvm guest.
355 */
356#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
357
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700358/* si_domain contains mulitple devices */
359#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
360
Mike Travis1b198bb2012-03-05 15:05:16 -0800361/* define the limit of IOMMUs supported in each domain */
362#ifdef CONFIG_X86
363# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
364#else
365# define IOMMU_UNITS_SUPPORTED 64
366#endif
367
Mark McLoughlin99126f72008-11-20 15:49:47 +0000368struct dmar_domain {
369 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700370 int nid; /* node id */
Mike Travis1b198bb2012-03-05 15:05:16 -0800371 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
372 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000373
374 struct list_head devices; /* all devices' list */
375 struct iova_domain iovad; /* iova's that belong to this domain */
376
377 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000378 int gaw; /* max guest address width */
379
380 /* adjusted guest address width, 0 is level 2 30-bit */
381 int agaw;
382
Weidong Han3b5410e2008-12-08 09:17:15 +0800383 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800384
385 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800386 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800387 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100388 int iommu_superpage;/* Level of superpages supported:
389 0 == 4KiB (no superpages), 1 == 2MiB,
390 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800391 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800392 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000393};
394
Mark McLoughlina647dac2008-11-20 15:49:48 +0000395/* PCI domain-device relationship */
396struct device_domain_info {
397 struct list_head link; /* link to domain siblings */
398 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100399 int segment; /* PCI domain */
400 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000401 u8 devfn; /* PCI devfn number */
Stefan Assmann45e829e2009-12-03 06:49:24 -0500402 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800403 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000404 struct dmar_domain *domain; /* pointer to domain */
405};
406
mark gross5e0d2a62008-03-04 15:22:08 -0800407static void flush_unmaps_timeout(unsigned long data);
408
409DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
410
mark gross80b20dd2008-04-18 13:53:58 -0700411#define HIGH_WATER_MARK 250
412struct deferred_flush_tables {
413 int next;
414 struct iova *iova[HIGH_WATER_MARK];
415 struct dmar_domain *domain[HIGH_WATER_MARK];
416};
417
418static struct deferred_flush_tables *deferred_flush;
419
mark gross5e0d2a62008-03-04 15:22:08 -0800420/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800421static int g_num_of_iommus;
422
423static DEFINE_SPINLOCK(async_umap_flush_lock);
424static LIST_HEAD(unmaps_to_do);
425
426static int timer_on;
427static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800428
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700429static void domain_remove_dev_info(struct dmar_domain *domain);
430
Suresh Siddhad3f13812011-08-23 17:05:25 -0700431#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800432int dmar_disabled = 0;
433#else
434int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700435#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800436
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200437int intel_iommu_enabled = 0;
438EXPORT_SYMBOL_GPL(intel_iommu_enabled);
439
David Woodhouse2d9e6672010-06-15 10:57:57 +0100440static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700441static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800442static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100443static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700444
David Woodhousec0771df2011-10-14 20:59:46 +0100445int intel_iommu_gfx_mapped;
446EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
447
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700448#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
449static DEFINE_SPINLOCK(device_domain_lock);
450static LIST_HEAD(device_domain_list);
451
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100452static struct iommu_ops intel_iommu_ops;
453
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700454static int __init intel_iommu_setup(char *str)
455{
456 if (!str)
457 return -EINVAL;
458 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800459 if (!strncmp(str, "on", 2)) {
460 dmar_disabled = 0;
461 printk(KERN_INFO "Intel-IOMMU: enabled\n");
462 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700463 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800464 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700465 } else if (!strncmp(str, "igfx_off", 8)) {
466 dmar_map_gfx = 0;
467 printk(KERN_INFO
468 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700469 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800470 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700471 "Intel-IOMMU: Forcing DAC for PCI devices\n");
472 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800473 } else if (!strncmp(str, "strict", 6)) {
474 printk(KERN_INFO
475 "Intel-IOMMU: disable batched IOTLB flush\n");
476 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100477 } else if (!strncmp(str, "sp_off", 6)) {
478 printk(KERN_INFO
479 "Intel-IOMMU: disable supported super page\n");
480 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700481 }
482
483 str += strcspn(str, ",");
484 while (*str == ',')
485 str++;
486 }
487 return 0;
488}
489__setup("intel_iommu=", intel_iommu_setup);
490
491static struct kmem_cache *iommu_domain_cache;
492static struct kmem_cache *iommu_devinfo_cache;
493static struct kmem_cache *iommu_iova_cache;
494
Suresh Siddha4c923d42009-10-02 11:01:24 -0700495static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700496{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700497 struct page *page;
498 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700499
Suresh Siddha4c923d42009-10-02 11:01:24 -0700500 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
501 if (page)
502 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700503 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700504}
505
506static inline void free_pgtable_page(void *vaddr)
507{
508 free_page((unsigned long)vaddr);
509}
510
511static inline void *alloc_domain_mem(void)
512{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900513 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700514}
515
Kay, Allen M38717942008-09-09 18:37:29 +0300516static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700517{
518 kmem_cache_free(iommu_domain_cache, vaddr);
519}
520
521static inline void * alloc_devinfo_mem(void)
522{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900523 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700524}
525
526static inline void free_devinfo_mem(void *vaddr)
527{
528 kmem_cache_free(iommu_devinfo_cache, vaddr);
529}
530
531struct iova *alloc_iova_mem(void)
532{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900533 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700534}
535
536void free_iova_mem(struct iova *iova)
537{
538 kmem_cache_free(iommu_iova_cache, iova);
539}
540
Weidong Han1b573682008-12-08 15:34:06 +0800541
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700542static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800543{
544 unsigned long sagaw;
545 int agaw = -1;
546
547 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700548 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800549 agaw >= 0; agaw--) {
550 if (test_bit(agaw, &sagaw))
551 break;
552 }
553
554 return agaw;
555}
556
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700557/*
558 * Calculate max SAGAW for each iommu.
559 */
560int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
561{
562 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
563}
564
565/*
566 * calculate agaw for each iommu.
567 * "SAGAW" may be different across iommus, use a default agaw, and
568 * get a supported less agaw for iommus that don't support the default agaw.
569 */
570int iommu_calculate_agaw(struct intel_iommu *iommu)
571{
572 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
573}
574
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700575/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800576static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
577{
578 int iommu_id;
579
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700580 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800581 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700582 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800583
Mike Travis1b198bb2012-03-05 15:05:16 -0800584 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800585 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
586 return NULL;
587
588 return g_iommus[iommu_id];
589}
590
Weidong Han8e6040972008-12-08 15:49:06 +0800591static void domain_update_iommu_coherency(struct dmar_domain *domain)
592{
593 int i;
594
Alex Williamson2e12bc22011-11-11 17:26:44 -0700595 i = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
596
597 domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800598
Mike Travis1b198bb2012-03-05 15:05:16 -0800599 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Weidong Han8e6040972008-12-08 15:49:06 +0800600 if (!ecap_coherent(g_iommus[i]->ecap)) {
601 domain->iommu_coherency = 0;
602 break;
603 }
Weidong Han8e6040972008-12-08 15:49:06 +0800604 }
605}
606
Sheng Yang58c610b2009-03-18 15:33:05 +0800607static void domain_update_iommu_snooping(struct dmar_domain *domain)
608{
609 int i;
610
611 domain->iommu_snooping = 1;
612
Mike Travis1b198bb2012-03-05 15:05:16 -0800613 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800614 if (!ecap_sc_support(g_iommus[i]->ecap)) {
615 domain->iommu_snooping = 0;
616 break;
617 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800618 }
619}
620
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100621static void domain_update_iommu_superpage(struct dmar_domain *domain)
622{
Allen Kay8140a952011-10-14 12:32:17 -0700623 struct dmar_drhd_unit *drhd;
624 struct intel_iommu *iommu = NULL;
625 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100626
627 if (!intel_iommu_superpage) {
628 domain->iommu_superpage = 0;
629 return;
630 }
631
Allen Kay8140a952011-10-14 12:32:17 -0700632 /* set iommu_superpage to the smallest common denominator */
633 for_each_active_iommu(iommu, drhd) {
634 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100635 if (!mask) {
636 break;
637 }
638 }
639 domain->iommu_superpage = fls(mask);
640}
641
Sheng Yang58c610b2009-03-18 15:33:05 +0800642/* Some capabilities may be different across iommus */
643static void domain_update_iommu_cap(struct dmar_domain *domain)
644{
645 domain_update_iommu_coherency(domain);
646 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100647 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800648}
649
David Woodhouse276dbf992009-04-04 01:45:37 +0100650static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800651{
652 struct dmar_drhd_unit *drhd = NULL;
653 int i;
654
655 for_each_drhd_unit(drhd) {
656 if (drhd->ignored)
657 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100658 if (segment != drhd->segment)
659 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800660
David Woodhouse924b6232009-04-04 00:39:25 +0100661 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000662 if (drhd->devices[i] &&
663 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800664 drhd->devices[i]->devfn == devfn)
665 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700666 if (drhd->devices[i] &&
667 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100668 drhd->devices[i]->subordinate->number <= bus &&
Yinghai Lub918c622012-05-17 18:51:11 -0700669 drhd->devices[i]->subordinate->busn_res.end >= bus)
David Woodhouse924b6232009-04-04 00:39:25 +0100670 return drhd->iommu;
671 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800672
673 if (drhd->include_all)
674 return drhd->iommu;
675 }
676
677 return NULL;
678}
679
Weidong Han5331fe62008-12-08 23:00:00 +0800680static void domain_flush_cache(struct dmar_domain *domain,
681 void *addr, int size)
682{
683 if (!domain->iommu_coherency)
684 clflush_cache_range(addr, size);
685}
686
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700687/* Gets context entry for a given bus and devfn */
688static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
689 u8 bus, u8 devfn)
690{
691 struct root_entry *root;
692 struct context_entry *context;
693 unsigned long phy_addr;
694 unsigned long flags;
695
696 spin_lock_irqsave(&iommu->lock, flags);
697 root = &iommu->root_entry[bus];
698 context = get_context_addr_from_root(root);
699 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700700 context = (struct context_entry *)
701 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700702 if (!context) {
703 spin_unlock_irqrestore(&iommu->lock, flags);
704 return NULL;
705 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700706 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700707 phy_addr = virt_to_phys((void *)context);
708 set_root_value(root, phy_addr);
709 set_root_present(root);
710 __iommu_flush_cache(iommu, root, sizeof(*root));
711 }
712 spin_unlock_irqrestore(&iommu->lock, flags);
713 return &context[devfn];
714}
715
716static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
717{
718 struct root_entry *root;
719 struct context_entry *context;
720 int ret;
721 unsigned long flags;
722
723 spin_lock_irqsave(&iommu->lock, flags);
724 root = &iommu->root_entry[bus];
725 context = get_context_addr_from_root(root);
726 if (!context) {
727 ret = 0;
728 goto out;
729 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000730 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700731out:
732 spin_unlock_irqrestore(&iommu->lock, flags);
733 return ret;
734}
735
736static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
737{
738 struct root_entry *root;
739 struct context_entry *context;
740 unsigned long flags;
741
742 spin_lock_irqsave(&iommu->lock, flags);
743 root = &iommu->root_entry[bus];
744 context = get_context_addr_from_root(root);
745 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000746 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700747 __iommu_flush_cache(iommu, &context[devfn], \
748 sizeof(*context));
749 }
750 spin_unlock_irqrestore(&iommu->lock, flags);
751}
752
753static void free_context_table(struct intel_iommu *iommu)
754{
755 struct root_entry *root;
756 int i;
757 unsigned long flags;
758 struct context_entry *context;
759
760 spin_lock_irqsave(&iommu->lock, flags);
761 if (!iommu->root_entry) {
762 goto out;
763 }
764 for (i = 0; i < ROOT_ENTRY_NR; i++) {
765 root = &iommu->root_entry[i];
766 context = get_context_addr_from_root(root);
767 if (context)
768 free_pgtable_page(context);
769 }
770 free_pgtable_page(iommu->root_entry);
771 iommu->root_entry = NULL;
772out:
773 spin_unlock_irqrestore(&iommu->lock, flags);
774}
775
David Woodhouseb026fd22009-06-28 10:37:25 +0100776static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
Allen Kay4399c8b2011-10-14 12:32:46 -0700777 unsigned long pfn, int target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700778{
David Woodhouseb026fd22009-06-28 10:37:25 +0100779 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700780 struct dma_pte *parent, *pte = NULL;
781 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700782 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700783
784 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100785 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700786 parent = domain->pgd;
787
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700788 while (level > 0) {
789 void *tmp_page;
790
David Woodhouseb026fd22009-06-28 10:37:25 +0100791 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700792 pte = &parent[offset];
Allen Kay4399c8b2011-10-14 12:32:46 -0700793 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100794 break;
795 if (level == target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700796 break;
797
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000798 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100799 uint64_t pteval;
800
Suresh Siddha4c923d42009-10-02 11:01:24 -0700801 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700802
David Woodhouse206a73c12009-07-01 19:30:28 +0100803 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700804 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100805
David Woodhousec85994e2009-07-01 19:21:24 +0100806 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400807 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100808 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
809 /* Someone else set it while we were thinking; use theirs. */
810 free_pgtable_page(tmp_page);
811 } else {
812 dma_pte_addr(pte);
813 domain_flush_cache(domain, pte, sizeof(*pte));
814 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700815 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000816 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700817 level--;
818 }
819
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700820 return pte;
821}
822
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100823
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700824/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100825static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
826 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100827 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700828{
829 struct dma_pte *parent, *pte = NULL;
830 int total = agaw_to_level(domain->agaw);
831 int offset;
832
833 parent = domain->pgd;
834 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100835 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700836 pte = &parent[offset];
837 if (level == total)
838 return pte;
839
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100840 if (!dma_pte_present(pte)) {
841 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700842 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100843 }
844
845 if (pte->val & DMA_PTE_LARGE_PAGE) {
846 *large_page = total;
847 return pte;
848 }
849
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000850 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700851 total--;
852 }
853 return NULL;
854}
855
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700856/* clear last level pte, a tlb flush should be followed */
Allen Kay292827c2011-10-14 12:31:54 -0700857static int dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100858 unsigned long start_pfn,
859 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700860{
David Woodhouse04b18e62009-06-27 19:15:01 +0100861 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100862 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100863 struct dma_pte *first_pte, *pte;
Allen Kay292827c2011-10-14 12:31:54 -0700864 int order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700865
David Woodhouse04b18e62009-06-27 19:15:01 +0100866 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100867 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700868 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100869
David Woodhouse04b18e62009-06-27 19:15:01 +0100870 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700871 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100872 large_page = 1;
873 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100874 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100875 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100876 continue;
877 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100878 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100879 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100880 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100881 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100882 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
883
David Woodhouse310a5ab2009-06-28 18:52:20 +0100884 domain_flush_cache(domain, first_pte,
885 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700886
887 } while (start_pfn && start_pfn <= last_pfn);
Allen Kay292827c2011-10-14 12:31:54 -0700888
889 order = (large_page - 1) * 9;
890 return order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700891}
892
Alex Williamson3269ee02013-06-15 10:27:19 -0600893static void dma_pte_free_level(struct dmar_domain *domain, int level,
894 struct dma_pte *pte, unsigned long pfn,
895 unsigned long start_pfn, unsigned long last_pfn)
896{
897 pfn = max(start_pfn, pfn);
898 pte = &pte[pfn_level_offset(pfn, level)];
899
900 do {
901 unsigned long level_pfn;
902 struct dma_pte *level_pte;
903
904 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
905 goto next;
906
907 level_pfn = pfn & level_mask(level - 1);
908 level_pte = phys_to_virt(dma_pte_addr(pte));
909
910 if (level > 2)
911 dma_pte_free_level(domain, level - 1, level_pte,
912 level_pfn, start_pfn, last_pfn);
913
914 /* If range covers entire pagetable, free it */
915 if (!(start_pfn > level_pfn ||
916 last_pfn < level_pfn + level_size(level))) {
917 dma_clear_pte(pte);
918 domain_flush_cache(domain, pte, sizeof(*pte));
919 free_pgtable_page(level_pte);
920 }
921next:
922 pfn += level_size(level);
923 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
924}
925
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700926/* free page table pages. last level pte should already be cleared */
927static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100928 unsigned long start_pfn,
929 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700930{
David Woodhouse6660c632009-06-27 22:41:00 +0100931 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700932
David Woodhouse6660c632009-06-27 22:41:00 +0100933 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
934 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700935 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700936
David Woodhousef3a0a522009-06-30 03:40:07 +0100937 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -0600938 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
939 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +0100940
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700941 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100942 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700943 free_pgtable_page(domain->pgd);
944 domain->pgd = NULL;
945 }
946}
947
948/* iommu handling */
949static int iommu_alloc_root_entry(struct intel_iommu *iommu)
950{
951 struct root_entry *root;
952 unsigned long flags;
953
Suresh Siddha4c923d42009-10-02 11:01:24 -0700954 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700955 if (!root)
956 return -ENOMEM;
957
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700958 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700959
960 spin_lock_irqsave(&iommu->lock, flags);
961 iommu->root_entry = root;
962 spin_unlock_irqrestore(&iommu->lock, flags);
963
964 return 0;
965}
966
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700967static void iommu_set_root_entry(struct intel_iommu *iommu)
968{
969 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100970 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700971 unsigned long flag;
972
973 addr = iommu->root_entry;
974
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200975 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700976 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
977
David Woodhousec416daa2009-05-10 20:30:58 +0100978 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700979
980 /* Make sure hardware complete it */
981 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100982 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700983
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200984 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700985}
986
987static void iommu_flush_write_buffer(struct intel_iommu *iommu)
988{
989 u32 val;
990 unsigned long flag;
991
David Woodhouse9af88142009-02-13 23:18:03 +0000992 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700993 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700994
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200995 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100996 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700997
998 /* Make sure hardware complete it */
999 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001000 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001001
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001002 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001003}
1004
1005/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001006static void __iommu_flush_context(struct intel_iommu *iommu,
1007 u16 did, u16 source_id, u8 function_mask,
1008 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001009{
1010 u64 val = 0;
1011 unsigned long flag;
1012
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001013 switch (type) {
1014 case DMA_CCMD_GLOBAL_INVL:
1015 val = DMA_CCMD_GLOBAL_INVL;
1016 break;
1017 case DMA_CCMD_DOMAIN_INVL:
1018 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1019 break;
1020 case DMA_CCMD_DEVICE_INVL:
1021 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1022 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1023 break;
1024 default:
1025 BUG();
1026 }
1027 val |= DMA_CCMD_ICC;
1028
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001029 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001030 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1031
1032 /* Make sure hardware complete it */
1033 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1034 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1035
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001036 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001037}
1038
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001039/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001040static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1041 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001042{
1043 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1044 u64 val = 0, val_iva = 0;
1045 unsigned long flag;
1046
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001047 switch (type) {
1048 case DMA_TLB_GLOBAL_FLUSH:
1049 /* global flush doesn't need set IVA_REG */
1050 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1051 break;
1052 case DMA_TLB_DSI_FLUSH:
1053 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1054 break;
1055 case DMA_TLB_PSI_FLUSH:
1056 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1057 /* Note: always flush non-leaf currently */
1058 val_iva = size_order | addr;
1059 break;
1060 default:
1061 BUG();
1062 }
1063 /* Note: set drain read/write */
1064#if 0
1065 /*
1066 * This is probably to be super secure.. Looks like we can
1067 * ignore it without any impact.
1068 */
1069 if (cap_read_drain(iommu->cap))
1070 val |= DMA_TLB_READ_DRAIN;
1071#endif
1072 if (cap_write_drain(iommu->cap))
1073 val |= DMA_TLB_WRITE_DRAIN;
1074
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001075 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001076 /* Note: Only uses first TLB reg currently */
1077 if (val_iva)
1078 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1079 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1080
1081 /* Make sure hardware complete it */
1082 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1083 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1084
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001085 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001086
1087 /* check IOTLB invalidation granularity */
1088 if (DMA_TLB_IAIG(val) == 0)
1089 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1090 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1091 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001092 (unsigned long long)DMA_TLB_IIRG(type),
1093 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001094}
1095
Yu Zhao93a23a72009-05-18 13:51:37 +08001096static struct device_domain_info *iommu_support_dev_iotlb(
1097 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001098{
Yu Zhao93a23a72009-05-18 13:51:37 +08001099 int found = 0;
1100 unsigned long flags;
1101 struct device_domain_info *info;
1102 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1103
1104 if (!ecap_dev_iotlb_support(iommu->ecap))
1105 return NULL;
1106
1107 if (!iommu->qi)
1108 return NULL;
1109
1110 spin_lock_irqsave(&device_domain_lock, flags);
1111 list_for_each_entry(info, &domain->devices, link)
1112 if (info->bus == bus && info->devfn == devfn) {
1113 found = 1;
1114 break;
1115 }
1116 spin_unlock_irqrestore(&device_domain_lock, flags);
1117
1118 if (!found || !info->dev)
1119 return NULL;
1120
1121 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1122 return NULL;
1123
1124 if (!dmar_find_matched_atsr_unit(info->dev))
1125 return NULL;
1126
1127 info->iommu = iommu;
1128
1129 return info;
1130}
1131
1132static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1133{
1134 if (!info)
1135 return;
1136
1137 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1138}
1139
1140static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1141{
1142 if (!info->dev || !pci_ats_enabled(info->dev))
1143 return;
1144
1145 pci_disable_ats(info->dev);
1146}
1147
1148static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1149 u64 addr, unsigned mask)
1150{
1151 u16 sid, qdep;
1152 unsigned long flags;
1153 struct device_domain_info *info;
1154
1155 spin_lock_irqsave(&device_domain_lock, flags);
1156 list_for_each_entry(info, &domain->devices, link) {
1157 if (!info->dev || !pci_ats_enabled(info->dev))
1158 continue;
1159
1160 sid = info->bus << 8 | info->devfn;
1161 qdep = pci_ats_queue_depth(info->dev);
1162 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1163 }
1164 spin_unlock_irqrestore(&device_domain_lock, flags);
1165}
1166
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001167static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
Nadav Amit82653632010-04-01 13:24:40 +03001168 unsigned long pfn, unsigned int pages, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001169{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001170 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001171 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001172
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001173 BUG_ON(pages == 0);
1174
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001175 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001176 * Fallback to domain selective flush if no PSI support or the size is
1177 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001178 * PSI requires page size to be 2 ^ x, and the base address is naturally
1179 * aligned to the size
1180 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001181 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1182 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001183 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001184 else
1185 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1186 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001187
1188 /*
Nadav Amit82653632010-04-01 13:24:40 +03001189 * In caching mode, changes of pages from non-present to present require
1190 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001191 */
Nadav Amit82653632010-04-01 13:24:40 +03001192 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001193 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001194}
1195
mark grossf8bab732008-02-08 04:18:38 -08001196static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1197{
1198 u32 pmen;
1199 unsigned long flags;
1200
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001201 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001202 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1203 pmen &= ~DMA_PMEN_EPM;
1204 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1205
1206 /* wait for the protected region status bit to clear */
1207 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1208 readl, !(pmen & DMA_PMEN_PRS), pmen);
1209
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001210 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001211}
1212
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001213static int iommu_enable_translation(struct intel_iommu *iommu)
1214{
1215 u32 sts;
1216 unsigned long flags;
1217
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001218 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001219 iommu->gcmd |= DMA_GCMD_TE;
1220 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001221
1222 /* Make sure hardware complete it */
1223 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001224 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001225
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001226 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001227 return 0;
1228}
1229
1230static int iommu_disable_translation(struct intel_iommu *iommu)
1231{
1232 u32 sts;
1233 unsigned long flag;
1234
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001235 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001236 iommu->gcmd &= ~DMA_GCMD_TE;
1237 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1238
1239 /* Make sure hardware complete it */
1240 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001241 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001242
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001243 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001244 return 0;
1245}
1246
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001247
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001248static int iommu_init_domains(struct intel_iommu *iommu)
1249{
1250 unsigned long ndomains;
1251 unsigned long nlongs;
1252
1253 ndomains = cap_ndoms(iommu->cap);
Masanari Iida68aeb962012-01-25 00:25:52 +09001254 pr_debug("IOMMU %d: Number of Domains supported <%ld>\n", iommu->seq_id,
Yinghai Lu680a7522010-04-08 19:58:23 +01001255 ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001256 nlongs = BITS_TO_LONGS(ndomains);
1257
Donald Dutile94a91b52009-08-20 16:51:34 -04001258 spin_lock_init(&iommu->lock);
1259
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001260 /* TBD: there might be 64K domains,
1261 * consider other allocation for future chip
1262 */
1263 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1264 if (!iommu->domain_ids) {
1265 printk(KERN_ERR "Allocating domain id array failed\n");
1266 return -ENOMEM;
1267 }
1268 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1269 GFP_KERNEL);
1270 if (!iommu->domains) {
1271 printk(KERN_ERR "Allocating domain array failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001272 return -ENOMEM;
1273 }
1274
1275 /*
1276 * if Caching mode is set, then invalid translations are tagged
1277 * with domainid 0. Hence we need to pre-allocate it.
1278 */
1279 if (cap_caching_mode(iommu->cap))
1280 set_bit(0, iommu->domain_ids);
1281 return 0;
1282}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001283
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001284
1285static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001286static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001287
1288void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001289{
1290 struct dmar_domain *domain;
1291 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001292 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001293
Donald Dutile94a91b52009-08-20 16:51:34 -04001294 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001295 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Donald Dutile94a91b52009-08-20 16:51:34 -04001296 domain = iommu->domains[i];
1297 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001298
Donald Dutile94a91b52009-08-20 16:51:34 -04001299 spin_lock_irqsave(&domain->iommu_lock, flags);
1300 if (--domain->iommu_count == 0) {
1301 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1302 vm_domain_exit(domain);
1303 else
1304 domain_exit(domain);
1305 }
1306 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001307 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001308 }
1309
1310 if (iommu->gcmd & DMA_GCMD_TE)
1311 iommu_disable_translation(iommu);
1312
1313 if (iommu->irq) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001314 irq_set_handler_data(iommu->irq, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001315 /* This will mask the irq */
1316 free_irq(iommu->irq, iommu);
1317 destroy_irq(iommu->irq);
1318 }
1319
1320 kfree(iommu->domains);
1321 kfree(iommu->domain_ids);
1322
Weidong Hand9630fe2008-12-08 11:06:32 +08001323 g_iommus[iommu->seq_id] = NULL;
1324
1325 /* if all iommus are freed, free g_iommus */
1326 for (i = 0; i < g_num_of_iommus; i++) {
1327 if (g_iommus[i])
1328 break;
1329 }
1330
1331 if (i == g_num_of_iommus)
1332 kfree(g_iommus);
1333
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001334 /* free context mapping */
1335 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001336}
1337
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001338static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001339{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001340 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001341
1342 domain = alloc_domain_mem();
1343 if (!domain)
1344 return NULL;
1345
Suresh Siddha4c923d42009-10-02 11:01:24 -07001346 domain->nid = -1;
Mike Travis1b198bb2012-03-05 15:05:16 -08001347 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Hand71a2f32008-12-07 21:13:41 +08001348 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001349
1350 return domain;
1351}
1352
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001353static int iommu_attach_domain(struct dmar_domain *domain,
1354 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001355{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001356 int num;
1357 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001358 unsigned long flags;
1359
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001360 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001361
1362 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001363
1364 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1365 if (num >= ndomains) {
1366 spin_unlock_irqrestore(&iommu->lock, flags);
1367 printk(KERN_ERR "IOMMU: no free domain ids\n");
1368 return -ENOMEM;
1369 }
1370
1371 domain->id = num;
1372 set_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001373 set_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001374 iommu->domains[num] = domain;
1375 spin_unlock_irqrestore(&iommu->lock, flags);
1376
1377 return 0;
1378}
1379
1380static void iommu_detach_domain(struct dmar_domain *domain,
1381 struct intel_iommu *iommu)
1382{
1383 unsigned long flags;
1384 int num, ndomains;
1385 int found = 0;
1386
1387 spin_lock_irqsave(&iommu->lock, flags);
1388 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001389 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001390 if (iommu->domains[num] == domain) {
1391 found = 1;
1392 break;
1393 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001394 }
1395
1396 if (found) {
1397 clear_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001398 clear_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001399 iommu->domains[num] = NULL;
1400 }
Weidong Han8c11e792008-12-08 15:29:22 +08001401 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001402}
1403
1404static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001405static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001406
Joseph Cihula51a63e62011-03-21 11:04:24 -07001407static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001408{
1409 struct pci_dev *pdev = NULL;
1410 struct iova *iova;
1411 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001412
David Millerf6611972008-02-06 01:36:23 -08001413 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001414
Mark Gross8a443df2008-03-04 14:59:31 -08001415 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1416 &reserved_rbtree_key);
1417
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001418 /* IOAPIC ranges shouldn't be accessed by DMA */
1419 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1420 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001421 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001422 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001423 return -ENODEV;
1424 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001425
1426 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1427 for_each_pci_dev(pdev) {
1428 struct resource *r;
1429
1430 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1431 r = &pdev->resource[i];
1432 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1433 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001434 iova = reserve_iova(&reserved_iova_list,
1435 IOVA_PFN(r->start),
1436 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001437 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001438 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001439 return -ENODEV;
1440 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001441 }
1442 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001443 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001444}
1445
1446static void domain_reserve_special_ranges(struct dmar_domain *domain)
1447{
1448 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1449}
1450
1451static inline int guestwidth_to_adjustwidth(int gaw)
1452{
1453 int agaw;
1454 int r = (gaw - 12) % 9;
1455
1456 if (r == 0)
1457 agaw = gaw;
1458 else
1459 agaw = gaw + 9 - r;
1460 if (agaw > 64)
1461 agaw = 64;
1462 return agaw;
1463}
1464
1465static int domain_init(struct dmar_domain *domain, int guest_width)
1466{
1467 struct intel_iommu *iommu;
1468 int adjust_width, agaw;
1469 unsigned long sagaw;
1470
David Millerf6611972008-02-06 01:36:23 -08001471 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001472 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001473
1474 domain_reserve_special_ranges(domain);
1475
1476 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001477 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001478 if (guest_width > cap_mgaw(iommu->cap))
1479 guest_width = cap_mgaw(iommu->cap);
1480 domain->gaw = guest_width;
1481 adjust_width = guestwidth_to_adjustwidth(guest_width);
1482 agaw = width_to_agaw(adjust_width);
1483 sagaw = cap_sagaw(iommu->cap);
1484 if (!test_bit(agaw, &sagaw)) {
1485 /* hardware doesn't support it, choose a bigger one */
1486 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1487 agaw = find_next_bit(&sagaw, 5, agaw);
1488 if (agaw >= 5)
1489 return -ENODEV;
1490 }
1491 domain->agaw = agaw;
1492 INIT_LIST_HEAD(&domain->devices);
1493
Weidong Han8e6040972008-12-08 15:49:06 +08001494 if (ecap_coherent(iommu->ecap))
1495 domain->iommu_coherency = 1;
1496 else
1497 domain->iommu_coherency = 0;
1498
Sheng Yang58c610b2009-03-18 15:33:05 +08001499 if (ecap_sc_support(iommu->ecap))
1500 domain->iommu_snooping = 1;
1501 else
1502 domain->iommu_snooping = 0;
1503
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001504 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
Weidong Hanc7151a82008-12-08 22:51:37 +08001505 domain->iommu_count = 1;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001506 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001507
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001508 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001509 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001510 if (!domain->pgd)
1511 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001512 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001513 return 0;
1514}
1515
1516static void domain_exit(struct dmar_domain *domain)
1517{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001518 struct dmar_drhd_unit *drhd;
1519 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001520
1521 /* Domain 0 is reserved, so dont process it */
1522 if (!domain)
1523 return;
1524
Alex Williamson7b668352011-05-24 12:02:41 +01001525 /* Flush any lazy unmaps that may reference this domain */
1526 if (!intel_iommu_strict)
1527 flush_unmaps_timeout(0);
1528
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001529 domain_remove_dev_info(domain);
1530 /* destroy iovas */
1531 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001532
1533 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001534 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001535
1536 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001537 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001538
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001539 for_each_active_iommu(iommu, drhd)
Mike Travis1b198bb2012-03-05 15:05:16 -08001540 if (test_bit(iommu->seq_id, domain->iommu_bmp))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001541 iommu_detach_domain(domain, iommu);
1542
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001543 free_domain_mem(domain);
1544}
1545
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001546static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1547 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001548{
1549 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001550 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001551 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001552 struct dma_pte *pgd;
1553 unsigned long num;
1554 unsigned long ndomains;
1555 int id;
1556 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001557 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001558
1559 pr_debug("Set context mapping for %02x:%02x.%d\n",
1560 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001561
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001562 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001563 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1564 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001565
David Woodhouse276dbf992009-04-04 01:45:37 +01001566 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001567 if (!iommu)
1568 return -ENODEV;
1569
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001570 context = device_to_context_entry(iommu, bus, devfn);
1571 if (!context)
1572 return -ENOMEM;
1573 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001574 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001575 spin_unlock_irqrestore(&iommu->lock, flags);
1576 return 0;
1577 }
1578
Weidong Hanea6606b2008-12-08 23:08:15 +08001579 id = domain->id;
1580 pgd = domain->pgd;
1581
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001582 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1583 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001584 int found = 0;
1585
1586 /* find an available domain id for this device in iommu */
1587 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001588 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001589 if (iommu->domains[num] == domain) {
1590 id = num;
1591 found = 1;
1592 break;
1593 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001594 }
1595
1596 if (found == 0) {
1597 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1598 if (num >= ndomains) {
1599 spin_unlock_irqrestore(&iommu->lock, flags);
1600 printk(KERN_ERR "IOMMU: no free domain ids\n");
1601 return -EFAULT;
1602 }
1603
1604 set_bit(num, iommu->domain_ids);
1605 iommu->domains[num] = domain;
1606 id = num;
1607 }
1608
1609 /* Skip top levels of page tables for
1610 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001611 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001612 */
Chris Wright1672af12009-12-02 12:06:34 -08001613 if (translation != CONTEXT_TT_PASS_THROUGH) {
1614 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1615 pgd = phys_to_virt(dma_pte_addr(pgd));
1616 if (!dma_pte_present(pgd)) {
1617 spin_unlock_irqrestore(&iommu->lock, flags);
1618 return -ENOMEM;
1619 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001620 }
1621 }
1622 }
1623
1624 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001625
Yu Zhao93a23a72009-05-18 13:51:37 +08001626 if (translation != CONTEXT_TT_PASS_THROUGH) {
1627 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1628 translation = info ? CONTEXT_TT_DEV_IOTLB :
1629 CONTEXT_TT_MULTI_LEVEL;
1630 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001631 /*
1632 * In pass through mode, AW must be programmed to indicate the largest
1633 * AGAW value supported by hardware. And ASR is ignored by hardware.
1634 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001635 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001636 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001637 else {
1638 context_set_address_root(context, virt_to_phys(pgd));
1639 context_set_address_width(context, iommu->agaw);
1640 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001641
1642 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001643 context_set_fault_enable(context);
1644 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001645 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001646
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001647 /*
1648 * It's a non-present to present mapping. If hardware doesn't cache
1649 * non-present entry we only need to flush the write-buffer. If the
1650 * _does_ cache non-present entries, then it does so in the special
1651 * domain #0, which we have to flush:
1652 */
1653 if (cap_caching_mode(iommu->cap)) {
1654 iommu->flush.flush_context(iommu, 0,
1655 (((u16)bus) << 8) | devfn,
1656 DMA_CCMD_MASK_NOBIT,
1657 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001658 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001659 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001660 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001661 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001662 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001663 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001664
1665 spin_lock_irqsave(&domain->iommu_lock, flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08001666 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08001667 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001668 if (domain->iommu_count == 1)
1669 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001670 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001671 }
1672 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001673 return 0;
1674}
1675
1676static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001677domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1678 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001679{
1680 int ret;
1681 struct pci_dev *tmp, *parent;
1682
David Woodhouse276dbf992009-04-04 01:45:37 +01001683 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001684 pdev->bus->number, pdev->devfn,
1685 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001686 if (ret)
1687 return ret;
1688
1689 /* dependent device mapping */
1690 tmp = pci_find_upstream_pcie_bridge(pdev);
1691 if (!tmp)
1692 return 0;
1693 /* Secondary interface's bus number and devfn 0 */
1694 parent = pdev->bus->self;
1695 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001696 ret = domain_context_mapping_one(domain,
1697 pci_domain_nr(parent->bus),
1698 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001699 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001700 if (ret)
1701 return ret;
1702 parent = parent->bus->self;
1703 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001704 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001705 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001706 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001707 tmp->subordinate->number, 0,
1708 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001709 else /* this is a legacy PCI bridge */
1710 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001711 pci_domain_nr(tmp->bus),
1712 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001713 tmp->devfn,
1714 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001715}
1716
Weidong Han5331fe62008-12-08 23:00:00 +08001717static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001718{
1719 int ret;
1720 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001721 struct intel_iommu *iommu;
1722
David Woodhouse276dbf992009-04-04 01:45:37 +01001723 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1724 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001725 if (!iommu)
1726 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001727
David Woodhouse276dbf992009-04-04 01:45:37 +01001728 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001729 if (!ret)
1730 return ret;
1731 /* dependent device mapping */
1732 tmp = pci_find_upstream_pcie_bridge(pdev);
1733 if (!tmp)
1734 return ret;
1735 /* Secondary interface's bus number and devfn 0 */
1736 parent = pdev->bus->self;
1737 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001738 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001739 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001740 if (!ret)
1741 return ret;
1742 parent = parent->bus->self;
1743 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001744 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001745 return device_context_mapped(iommu, tmp->subordinate->number,
1746 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001747 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001748 return device_context_mapped(iommu, tmp->bus->number,
1749 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001750}
1751
Fenghua Yuf5329592009-08-04 15:09:37 -07001752/* Returns a number of VTD pages, but aligned to MM page size */
1753static inline unsigned long aligned_nrpages(unsigned long host_addr,
1754 size_t size)
1755{
1756 host_addr &= ~PAGE_MASK;
1757 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1758}
1759
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001760/* Return largest possible superpage level for a given mapping */
1761static inline int hardware_largepage_caps(struct dmar_domain *domain,
1762 unsigned long iov_pfn,
1763 unsigned long phy_pfn,
1764 unsigned long pages)
1765{
1766 int support, level = 1;
1767 unsigned long pfnmerge;
1768
1769 support = domain->iommu_superpage;
1770
1771 /* To use a large page, the virtual *and* physical addresses
1772 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1773 of them will mean we have to use smaller pages. So just
1774 merge them and check both at once. */
1775 pfnmerge = iov_pfn | phy_pfn;
1776
1777 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1778 pages >>= VTD_STRIDE_SHIFT;
1779 if (!pages)
1780 break;
1781 pfnmerge >>= VTD_STRIDE_SHIFT;
1782 level++;
1783 support--;
1784 }
1785 return level;
1786}
1787
David Woodhouse9051aa02009-06-29 12:30:54 +01001788static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1789 struct scatterlist *sg, unsigned long phys_pfn,
1790 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001791{
1792 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001793 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001794 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001795 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001796 unsigned int largepage_lvl = 0;
1797 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001798
1799 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1800
1801 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1802 return -EINVAL;
1803
1804 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1805
David Woodhouse9051aa02009-06-29 12:30:54 +01001806 if (sg)
1807 sg_res = 0;
1808 else {
1809 sg_res = nr_pages + 1;
1810 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1811 }
1812
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001813 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001814 uint64_t tmp;
1815
David Woodhousee1605492009-06-29 11:17:38 +01001816 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001817 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001818 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1819 sg->dma_length = sg->length;
1820 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001821 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001822 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001823
David Woodhousee1605492009-06-29 11:17:38 +01001824 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001825 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1826
1827 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01001828 if (!pte)
1829 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001830 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001831 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001832 pteval |= DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001833 /* Ensure that old small page tables are removed to make room
1834 for superpage, if they exist. */
1835 dma_pte_clear_range(domain, iov_pfn,
1836 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1837 dma_pte_free_pagetable(domain, iov_pfn,
1838 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1839 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001840 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001841 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001842
David Woodhousee1605492009-06-29 11:17:38 +01001843 }
1844 /* We don't need lock here, nobody else
1845 * touches the iova range
1846 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001847 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001848 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001849 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001850 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1851 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001852 if (dumps) {
1853 dumps--;
1854 debug_dma_dump_mappings(NULL);
1855 }
1856 WARN_ON(1);
1857 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001858
1859 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1860
1861 BUG_ON(nr_pages < lvl_pages);
1862 BUG_ON(sg_res < lvl_pages);
1863
1864 nr_pages -= lvl_pages;
1865 iov_pfn += lvl_pages;
1866 phys_pfn += lvl_pages;
1867 pteval += lvl_pages * VTD_PAGE_SIZE;
1868 sg_res -= lvl_pages;
1869
1870 /* If the next PTE would be the first in a new page, then we
1871 need to flush the cache on the entries we've just written.
1872 And then we'll need to recalculate 'pte', so clear it and
1873 let it get set again in the if (!pte) block above.
1874
1875 If we're done (!nr_pages) we need to flush the cache too.
1876
1877 Also if we've been setting superpages, we may need to
1878 recalculate 'pte' and switch back to smaller pages for the
1879 end of the mapping, if the trailing size is not enough to
1880 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01001881 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001882 if (!nr_pages || first_pte_in_page(pte) ||
1883 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01001884 domain_flush_cache(domain, first_pte,
1885 (void *)pte - (void *)first_pte);
1886 pte = NULL;
1887 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001888
1889 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01001890 sg = sg_next(sg);
1891 }
1892 return 0;
1893}
1894
David Woodhouse9051aa02009-06-29 12:30:54 +01001895static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1896 struct scatterlist *sg, unsigned long nr_pages,
1897 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001898{
David Woodhouse9051aa02009-06-29 12:30:54 +01001899 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1900}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001901
David Woodhouse9051aa02009-06-29 12:30:54 +01001902static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1903 unsigned long phys_pfn, unsigned long nr_pages,
1904 int prot)
1905{
1906 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001907}
1908
Weidong Hanc7151a82008-12-08 22:51:37 +08001909static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001910{
Weidong Hanc7151a82008-12-08 22:51:37 +08001911 if (!iommu)
1912 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001913
1914 clear_context_table(iommu, bus, devfn);
1915 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001916 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001917 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001918}
1919
David Woodhouse109b9b02012-05-25 17:43:02 +01001920static inline void unlink_domain_info(struct device_domain_info *info)
1921{
1922 assert_spin_locked(&device_domain_lock);
1923 list_del(&info->link);
1924 list_del(&info->global);
1925 if (info->dev)
1926 info->dev->dev.archdata.iommu = NULL;
1927}
1928
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001929static void domain_remove_dev_info(struct dmar_domain *domain)
1930{
1931 struct device_domain_info *info;
1932 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001933 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001934
1935 spin_lock_irqsave(&device_domain_lock, flags);
1936 while (!list_empty(&domain->devices)) {
1937 info = list_entry(domain->devices.next,
1938 struct device_domain_info, link);
David Woodhouse109b9b02012-05-25 17:43:02 +01001939 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001940 spin_unlock_irqrestore(&device_domain_lock, flags);
1941
Yu Zhao93a23a72009-05-18 13:51:37 +08001942 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001943 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001944 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001945 free_devinfo_mem(info);
1946
1947 spin_lock_irqsave(&device_domain_lock, flags);
1948 }
1949 spin_unlock_irqrestore(&device_domain_lock, flags);
1950}
1951
1952/*
1953 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001954 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001955 */
Kay, Allen M38717942008-09-09 18:37:29 +03001956static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001957find_domain(struct pci_dev *pdev)
1958{
1959 struct device_domain_info *info;
1960
1961 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001962 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001963 if (info)
1964 return info->domain;
1965 return NULL;
1966}
1967
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001968/* domain is initialized */
1969static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1970{
1971 struct dmar_domain *domain, *found = NULL;
1972 struct intel_iommu *iommu;
1973 struct dmar_drhd_unit *drhd;
1974 struct device_domain_info *info, *tmp;
1975 struct pci_dev *dev_tmp;
1976 unsigned long flags;
1977 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001978 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001979 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001980
1981 domain = find_domain(pdev);
1982 if (domain)
1983 return domain;
1984
David Woodhouse276dbf992009-04-04 01:45:37 +01001985 segment = pci_domain_nr(pdev->bus);
1986
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001987 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1988 if (dev_tmp) {
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001989 if (pci_is_pcie(dev_tmp)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001990 bus = dev_tmp->subordinate->number;
1991 devfn = 0;
1992 } else {
1993 bus = dev_tmp->bus->number;
1994 devfn = dev_tmp->devfn;
1995 }
1996 spin_lock_irqsave(&device_domain_lock, flags);
1997 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001998 if (info->segment == segment &&
1999 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002000 found = info->domain;
2001 break;
2002 }
2003 }
2004 spin_unlock_irqrestore(&device_domain_lock, flags);
2005 /* pcie-pci bridge already has a domain, uses it */
2006 if (found) {
2007 domain = found;
2008 goto found_domain;
2009 }
2010 }
2011
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002012 domain = alloc_domain();
2013 if (!domain)
2014 goto error;
2015
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002016 /* Allocate new domain for the device */
2017 drhd = dmar_find_matched_drhd_unit(pdev);
2018 if (!drhd) {
2019 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
2020 pci_name(pdev));
Julia Lawalld2900bd2012-07-24 16:18:14 +02002021 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002022 return NULL;
2023 }
2024 iommu = drhd->iommu;
2025
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002026 ret = iommu_attach_domain(domain, iommu);
2027 if (ret) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002028 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002029 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002030 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002031
2032 if (domain_init(domain, gaw)) {
2033 domain_exit(domain);
2034 goto error;
2035 }
2036
2037 /* register pcie-to-pci device */
2038 if (dev_tmp) {
2039 info = alloc_devinfo_mem();
2040 if (!info) {
2041 domain_exit(domain);
2042 goto error;
2043 }
David Woodhouse276dbf992009-04-04 01:45:37 +01002044 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002045 info->bus = bus;
2046 info->devfn = devfn;
2047 info->dev = NULL;
2048 info->domain = domain;
2049 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08002050 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002051
2052 /* pcie-to-pci bridge already has a domain, uses it */
2053 found = NULL;
2054 spin_lock_irqsave(&device_domain_lock, flags);
2055 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01002056 if (tmp->segment == segment &&
2057 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002058 found = tmp->domain;
2059 break;
2060 }
2061 }
2062 if (found) {
Jiri Slaby00dfff72010-06-14 17:17:32 +02002063 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002064 free_devinfo_mem(info);
2065 domain_exit(domain);
2066 domain = found;
2067 } else {
2068 list_add(&info->link, &domain->devices);
2069 list_add(&info->global, &device_domain_list);
Jiri Slaby00dfff72010-06-14 17:17:32 +02002070 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002071 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002072 }
2073
2074found_domain:
2075 info = alloc_devinfo_mem();
2076 if (!info)
2077 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01002078 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002079 info->bus = pdev->bus->number;
2080 info->devfn = pdev->devfn;
2081 info->dev = pdev;
2082 info->domain = domain;
2083 spin_lock_irqsave(&device_domain_lock, flags);
2084 /* somebody is fast */
2085 found = find_domain(pdev);
2086 if (found != NULL) {
2087 spin_unlock_irqrestore(&device_domain_lock, flags);
2088 if (found != domain) {
2089 domain_exit(domain);
2090 domain = found;
2091 }
2092 free_devinfo_mem(info);
2093 return domain;
2094 }
2095 list_add(&info->link, &domain->devices);
2096 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002097 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002098 spin_unlock_irqrestore(&device_domain_lock, flags);
2099 return domain;
2100error:
2101 /* recheck it here, maybe others set it */
2102 return find_domain(pdev);
2103}
2104
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002105static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002106#define IDENTMAP_ALL 1
2107#define IDENTMAP_GFX 2
2108#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002109
David Woodhouseb2132032009-06-26 18:50:28 +01002110static int iommu_domain_identity_map(struct dmar_domain *domain,
2111 unsigned long long start,
2112 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002113{
David Woodhousec5395d52009-06-28 16:35:56 +01002114 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2115 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002116
David Woodhousec5395d52009-06-28 16:35:56 +01002117 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2118 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002119 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002120 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002121 }
2122
David Woodhousec5395d52009-06-28 16:35:56 +01002123 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2124 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002125 /*
2126 * RMRR range might have overlap with physical memory range,
2127 * clear it first
2128 */
David Woodhousec5395d52009-06-28 16:35:56 +01002129 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002130
David Woodhousec5395d52009-06-28 16:35:56 +01002131 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2132 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002133 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002134}
2135
2136static int iommu_prepare_identity_map(struct pci_dev *pdev,
2137 unsigned long long start,
2138 unsigned long long end)
2139{
2140 struct dmar_domain *domain;
2141 int ret;
2142
David Woodhousec7ab48d2009-06-26 19:10:36 +01002143 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002144 if (!domain)
2145 return -ENOMEM;
2146
David Woodhouse19943b02009-08-04 16:19:20 +01002147 /* For _hardware_ passthrough, don't bother. But for software
2148 passthrough, we do it anyway -- it may indicate a memory
2149 range which is reserved in E820, so which didn't get set
2150 up to start with in si_domain */
2151 if (domain == si_domain && hw_pass_through) {
2152 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2153 pci_name(pdev), start, end);
2154 return 0;
2155 }
2156
2157 printk(KERN_INFO
2158 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2159 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002160
David Woodhouse5595b522009-12-02 09:21:55 +00002161 if (end < start) {
2162 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2163 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2164 dmi_get_system_info(DMI_BIOS_VENDOR),
2165 dmi_get_system_info(DMI_BIOS_VERSION),
2166 dmi_get_system_info(DMI_PRODUCT_VERSION));
2167 ret = -EIO;
2168 goto error;
2169 }
2170
David Woodhouse2ff729f2009-08-26 14:25:41 +01002171 if (end >> agaw_to_width(domain->agaw)) {
2172 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2173 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2174 agaw_to_width(domain->agaw),
2175 dmi_get_system_info(DMI_BIOS_VENDOR),
2176 dmi_get_system_info(DMI_BIOS_VERSION),
2177 dmi_get_system_info(DMI_PRODUCT_VERSION));
2178 ret = -EIO;
2179 goto error;
2180 }
David Woodhouse19943b02009-08-04 16:19:20 +01002181
David Woodhouseb2132032009-06-26 18:50:28 +01002182 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002183 if (ret)
2184 goto error;
2185
2186 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002187 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002188 if (ret)
2189 goto error;
2190
2191 return 0;
2192
2193 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002194 domain_exit(domain);
2195 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002196}
2197
2198static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2199 struct pci_dev *pdev)
2200{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002201 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002202 return 0;
2203 return iommu_prepare_identity_map(pdev, rmrr->base_address,
David Woodhouse70e535d2011-05-31 00:22:52 +01002204 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002205}
2206
Suresh Siddhad3f13812011-08-23 17:05:25 -07002207#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002208static inline void iommu_prepare_isa(void)
2209{
2210 struct pci_dev *pdev;
2211 int ret;
2212
2213 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2214 if (!pdev)
2215 return;
2216
David Woodhousec7ab48d2009-06-26 19:10:36 +01002217 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse70e535d2011-05-31 00:22:52 +01002218 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002219
2220 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002221 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2222 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002223
2224}
2225#else
2226static inline void iommu_prepare_isa(void)
2227{
2228 return;
2229}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002230#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002231
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002232static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002233
Matt Kraai071e1372009-08-23 22:30:22 -07002234static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002235{
2236 struct dmar_drhd_unit *drhd;
2237 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002238 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002239
2240 si_domain = alloc_domain();
2241 if (!si_domain)
2242 return -EFAULT;
2243
David Woodhousec7ab48d2009-06-26 19:10:36 +01002244 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002245
2246 for_each_active_iommu(iommu, drhd) {
2247 ret = iommu_attach_domain(si_domain, iommu);
2248 if (ret) {
2249 domain_exit(si_domain);
2250 return -EFAULT;
2251 }
2252 }
2253
2254 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2255 domain_exit(si_domain);
2256 return -EFAULT;
2257 }
2258
2259 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2260
David Woodhouse19943b02009-08-04 16:19:20 +01002261 if (hw)
2262 return 0;
2263
David Woodhousec7ab48d2009-06-26 19:10:36 +01002264 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002265 unsigned long start_pfn, end_pfn;
2266 int i;
2267
2268 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2269 ret = iommu_domain_identity_map(si_domain,
2270 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2271 if (ret)
2272 return ret;
2273 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002274 }
2275
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002276 return 0;
2277}
2278
2279static void domain_remove_one_dev_info(struct dmar_domain *domain,
2280 struct pci_dev *pdev);
2281static int identity_mapping(struct pci_dev *pdev)
2282{
2283 struct device_domain_info *info;
2284
2285 if (likely(!iommu_identity_mapping))
2286 return 0;
2287
Mike Traviscb452a42011-05-28 13:15:03 -05002288 info = pdev->dev.archdata.iommu;
2289 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2290 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002291
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002292 return 0;
2293}
2294
2295static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002296 struct pci_dev *pdev,
2297 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002298{
2299 struct device_domain_info *info;
2300 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002301 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002302
2303 info = alloc_devinfo_mem();
2304 if (!info)
2305 return -ENOMEM;
2306
2307 info->segment = pci_domain_nr(pdev->bus);
2308 info->bus = pdev->bus->number;
2309 info->devfn = pdev->devfn;
2310 info->dev = pdev;
2311 info->domain = domain;
2312
2313 spin_lock_irqsave(&device_domain_lock, flags);
2314 list_add(&info->link, &domain->devices);
2315 list_add(&info->global, &device_domain_list);
2316 pdev->dev.archdata.iommu = info;
2317 spin_unlock_irqrestore(&device_domain_lock, flags);
2318
David Woodhousee2ad23d2012-05-25 17:42:54 +01002319 ret = domain_context_mapping(domain, pdev, translation);
2320 if (ret) {
2321 spin_lock_irqsave(&device_domain_lock, flags);
David Woodhouse109b9b02012-05-25 17:43:02 +01002322 unlink_domain_info(info);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002323 spin_unlock_irqrestore(&device_domain_lock, flags);
2324 free_devinfo_mem(info);
2325 return ret;
2326 }
2327
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002328 return 0;
2329}
2330
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002331static bool device_has_rmrr(struct pci_dev *dev)
2332{
2333 struct dmar_rmrr_unit *rmrr;
2334 int i;
2335
2336 for_each_rmrr_units(rmrr) {
2337 for (i = 0; i < rmrr->devices_cnt; i++) {
2338 /*
2339 * Return TRUE if this RMRR contains the device that
2340 * is passed in.
2341 */
2342 if (rmrr->devices[i] == dev)
2343 return true;
2344 }
2345 }
2346 return false;
2347}
2348
David Woodhouse6941af22009-07-04 18:24:27 +01002349static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2350{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002351
2352 /*
2353 * We want to prevent any device associated with an RMRR from
2354 * getting placed into the SI Domain. This is done because
2355 * problems exist when devices are moved in and out of domains
2356 * and their respective RMRR info is lost. We exempt USB devices
2357 * from this process due to their usage of RMRRs that are known
2358 * to not be needed after BIOS hand-off to OS.
2359 */
2360 if (device_has_rmrr(pdev) &&
2361 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2362 return 0;
2363
David Woodhousee0fc7e02009-09-30 09:12:17 -07002364 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2365 return 1;
2366
2367 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2368 return 1;
2369
2370 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2371 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002372
David Woodhouse3dfc8132009-07-04 19:11:08 +01002373 /*
2374 * We want to start off with all devices in the 1:1 domain, and
2375 * take them out later if we find they can't access all of memory.
2376 *
2377 * However, we can't do this for PCI devices behind bridges,
2378 * because all PCI devices behind the same bridge will end up
2379 * with the same source-id on their transactions.
2380 *
2381 * Practically speaking, we can't change things around for these
2382 * devices at run-time, because we can't be sure there'll be no
2383 * DMA transactions in flight for any of their siblings.
2384 *
2385 * So PCI devices (unless they're on the root bus) as well as
2386 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2387 * the 1:1 domain, just in _case_ one of their siblings turns out
2388 * not to be able to map all of memory.
2389 */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002390 if (!pci_is_pcie(pdev)) {
David Woodhouse3dfc8132009-07-04 19:11:08 +01002391 if (!pci_is_root_bus(pdev->bus))
2392 return 0;
2393 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2394 return 0;
Yijing Wang62f87c02012-07-24 17:20:03 +08002395 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
David Woodhouse3dfc8132009-07-04 19:11:08 +01002396 return 0;
2397
2398 /*
2399 * At boot time, we don't yet know if devices will be 64-bit capable.
2400 * Assume that they will -- if they turn out not to be, then we can
2401 * take them out of the 1:1 domain later.
2402 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002403 if (!startup) {
2404 /*
2405 * If the device's dma_mask is less than the system's memory
2406 * size then this is not a candidate for identity mapping.
2407 */
2408 u64 dma_mask = pdev->dma_mask;
2409
2410 if (pdev->dev.coherent_dma_mask &&
2411 pdev->dev.coherent_dma_mask < dma_mask)
2412 dma_mask = pdev->dev.coherent_dma_mask;
2413
2414 return dma_mask >= dma_get_required_mask(&pdev->dev);
2415 }
David Woodhouse6941af22009-07-04 18:24:27 +01002416
2417 return 1;
2418}
2419
Matt Kraai071e1372009-08-23 22:30:22 -07002420static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002421{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002422 struct pci_dev *pdev = NULL;
2423 int ret;
2424
David Woodhouse19943b02009-08-04 16:19:20 +01002425 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002426 if (ret)
2427 return -EFAULT;
2428
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002429 for_each_pci_dev(pdev) {
David Woodhouse6941af22009-07-04 18:24:27 +01002430 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse5fe60f42009-08-09 10:53:41 +01002431 ret = domain_add_dev_info(si_domain, pdev,
Mike Traviseae460b2012-03-05 15:05:16 -08002432 hw ? CONTEXT_TT_PASS_THROUGH :
2433 CONTEXT_TT_MULTI_LEVEL);
2434 if (ret) {
2435 /* device not associated with an iommu */
2436 if (ret == -ENODEV)
2437 continue;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002438 return ret;
Mike Traviseae460b2012-03-05 15:05:16 -08002439 }
2440 pr_info("IOMMU: %s identity mapping for device %s\n",
2441 hw ? "hardware" : "software", pci_name(pdev));
David Woodhouse62edf5d2009-07-04 10:59:46 +01002442 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002443 }
2444
2445 return 0;
2446}
2447
Joseph Cihulab7792602011-05-03 00:08:37 -07002448static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002449{
2450 struct dmar_drhd_unit *drhd;
2451 struct dmar_rmrr_unit *rmrr;
2452 struct pci_dev *pdev;
2453 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002454 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002455
2456 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002457 * for each drhd
2458 * allocate root
2459 * initialize and program root entry to not present
2460 * endfor
2461 */
2462 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002463 /*
2464 * lock not needed as this is only incremented in the single
2465 * threaded kernel __init code path all other access are read
2466 * only
2467 */
Mike Travis1b198bb2012-03-05 15:05:16 -08002468 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2469 g_num_of_iommus++;
2470 continue;
2471 }
2472 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2473 IOMMU_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002474 }
2475
Weidong Hand9630fe2008-12-08 11:06:32 +08002476 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2477 GFP_KERNEL);
2478 if (!g_iommus) {
2479 printk(KERN_ERR "Allocating global iommu array failed\n");
2480 ret = -ENOMEM;
2481 goto error;
2482 }
2483
mark gross80b20dd2008-04-18 13:53:58 -07002484 deferred_flush = kzalloc(g_num_of_iommus *
2485 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2486 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002487 ret = -ENOMEM;
2488 goto error;
2489 }
2490
mark gross5e0d2a62008-03-04 15:22:08 -08002491 for_each_drhd_unit(drhd) {
2492 if (drhd->ignored)
2493 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002494
2495 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002496 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002497
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002498 ret = iommu_init_domains(iommu);
2499 if (ret)
2500 goto error;
2501
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002502 /*
2503 * TBD:
2504 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002505 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002506 */
2507 ret = iommu_alloc_root_entry(iommu);
2508 if (ret) {
2509 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2510 goto error;
2511 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002512 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002513 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002514 }
2515
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002516 /*
2517 * Start from the sane iommu hardware state.
2518 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002519 for_each_drhd_unit(drhd) {
2520 if (drhd->ignored)
2521 continue;
2522
2523 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002524
2525 /*
2526 * If the queued invalidation is already initialized by us
2527 * (for example, while enabling interrupt-remapping) then
2528 * we got the things already rolling from a sane state.
2529 */
2530 if (iommu->qi)
2531 continue;
2532
2533 /*
2534 * Clear any previous faults.
2535 */
2536 dmar_fault(-1, iommu);
2537 /*
2538 * Disable queued invalidation if supported and already enabled
2539 * before OS handover.
2540 */
2541 dmar_disable_qi(iommu);
2542 }
2543
2544 for_each_drhd_unit(drhd) {
2545 if (drhd->ignored)
2546 continue;
2547
2548 iommu = drhd->iommu;
2549
Youquan Songa77b67d2008-10-16 16:31:56 -07002550 if (dmar_enable_qi(iommu)) {
2551 /*
2552 * Queued Invalidate not enabled, use Register Based
2553 * Invalidate
2554 */
2555 iommu->flush.flush_context = __iommu_flush_context;
2556 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002557 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002558 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002559 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002560 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002561 } else {
2562 iommu->flush.flush_context = qi_flush_context;
2563 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002564 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002565 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002566 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002567 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002568 }
2569 }
2570
David Woodhouse19943b02009-08-04 16:19:20 +01002571 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002572 iommu_identity_mapping |= IDENTMAP_ALL;
2573
Suresh Siddhad3f13812011-08-23 17:05:25 -07002574#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002575 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002576#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002577
2578 check_tylersburg_isoch();
2579
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002580 /*
2581 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002582 * identity mappings for rmrr, gfx, and isa and may fall back to static
2583 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002584 */
David Woodhouse19943b02009-08-04 16:19:20 +01002585 if (iommu_identity_mapping) {
2586 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2587 if (ret) {
2588 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2589 goto error;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002590 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002591 }
David Woodhouse19943b02009-08-04 16:19:20 +01002592 /*
2593 * For each rmrr
2594 * for each dev attached to rmrr
2595 * do
2596 * locate drhd for dev, alloc domain for dev
2597 * allocate free domain
2598 * allocate page table entries for rmrr
2599 * if context not allocated for bus
2600 * allocate and init context
2601 * set present in root table for this bus
2602 * init context with domain, translation etc
2603 * endfor
2604 * endfor
2605 */
2606 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2607 for_each_rmrr_units(rmrr) {
2608 for (i = 0; i < rmrr->devices_cnt; i++) {
2609 pdev = rmrr->devices[i];
2610 /*
2611 * some BIOS lists non-exist devices in DMAR
2612 * table.
2613 */
2614 if (!pdev)
2615 continue;
2616 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2617 if (ret)
2618 printk(KERN_ERR
2619 "IOMMU: mapping reserved region failed\n");
2620 }
2621 }
2622
2623 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002624
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002625 /*
2626 * for each drhd
2627 * enable fault log
2628 * global invalidate context cache
2629 * global invalidate iotlb
2630 * enable translation
2631 */
2632 for_each_drhd_unit(drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002633 if (drhd->ignored) {
2634 /*
2635 * we always have to disable PMRs or DMA may fail on
2636 * this device
2637 */
2638 if (force_on)
2639 iommu_disable_protect_mem_regions(drhd->iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002640 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002641 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002642 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002643
2644 iommu_flush_write_buffer(iommu);
2645
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002646 ret = dmar_set_interrupt(iommu);
2647 if (ret)
2648 goto error;
2649
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002650 iommu_set_root_entry(iommu);
2651
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002652 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002653 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002654
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002655 ret = iommu_enable_translation(iommu);
2656 if (ret)
2657 goto error;
David Woodhouseb94996c2009-09-19 15:28:12 -07002658
2659 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002660 }
2661
2662 return 0;
2663error:
2664 for_each_drhd_unit(drhd) {
2665 if (drhd->ignored)
2666 continue;
2667 iommu = drhd->iommu;
2668 free_iommu(iommu);
2669 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002670 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002671 return ret;
2672}
2673
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002674/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002675static struct iova *intel_alloc_iova(struct device *dev,
2676 struct dmar_domain *domain,
2677 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002678{
2679 struct pci_dev *pdev = to_pci_dev(dev);
2680 struct iova *iova = NULL;
2681
David Woodhouse875764d2009-06-28 21:20:51 +01002682 /* Restrict dma_mask to the width that the iommu can handle */
2683 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2684
2685 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002686 /*
2687 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002688 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002689 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002690 */
David Woodhouse875764d2009-06-28 21:20:51 +01002691 iova = alloc_iova(&domain->iovad, nrpages,
2692 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2693 if (iova)
2694 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002695 }
David Woodhouse875764d2009-06-28 21:20:51 +01002696 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2697 if (unlikely(!iova)) {
2698 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2699 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002700 return NULL;
2701 }
2702
2703 return iova;
2704}
2705
David Woodhouse147202a2009-07-07 19:43:20 +01002706static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002707{
2708 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002709 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002710
2711 domain = get_domain_for_dev(pdev,
2712 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2713 if (!domain) {
2714 printk(KERN_ERR
2715 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002716 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002717 }
2718
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002719 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002720 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002721 ret = domain_context_mapping(domain, pdev,
2722 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002723 if (ret) {
2724 printk(KERN_ERR
2725 "Domain context map for %s failed",
2726 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002727 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002728 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002729 }
2730
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002731 return domain;
2732}
2733
David Woodhouse147202a2009-07-07 19:43:20 +01002734static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2735{
2736 struct device_domain_info *info;
2737
2738 /* No lock here, assumes no domain exit in normal case */
2739 info = dev->dev.archdata.iommu;
2740 if (likely(info))
2741 return info->domain;
2742
2743 return __get_valid_domain_for_dev(dev);
2744}
2745
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002746static int iommu_dummy(struct pci_dev *pdev)
2747{
2748 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2749}
2750
2751/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002752static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002753{
David Woodhouse73676832009-07-04 14:08:36 +01002754 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002755 int found;
2756
David Woodhouse73676832009-07-04 14:08:36 +01002757 if (unlikely(dev->bus != &pci_bus_type))
2758 return 1;
2759
2760 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002761 if (iommu_dummy(pdev))
2762 return 1;
2763
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002764 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002765 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002766
2767 found = identity_mapping(pdev);
2768 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002769 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002770 return 1;
2771 else {
2772 /*
2773 * 32 bit DMA is removed from si_domain and fall back
2774 * to non-identity mapping.
2775 */
2776 domain_remove_one_dev_info(si_domain, pdev);
2777 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2778 pci_name(pdev));
2779 return 0;
2780 }
2781 } else {
2782 /*
2783 * In case of a detached 64 bit DMA device from vm, the device
2784 * is put into si_domain for identity mapping.
2785 */
David Woodhouse6941af22009-07-04 18:24:27 +01002786 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002787 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002788 ret = domain_add_dev_info(si_domain, pdev,
2789 hw_pass_through ?
2790 CONTEXT_TT_PASS_THROUGH :
2791 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002792 if (!ret) {
2793 printk(KERN_INFO "64bit %s uses identity mapping\n",
2794 pci_name(pdev));
2795 return 1;
2796 }
2797 }
2798 }
2799
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002800 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002801}
2802
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002803static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2804 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002805{
2806 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002807 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002808 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002809 struct iova *iova;
2810 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002811 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002812 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002813 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002814
2815 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002816
David Woodhouse73676832009-07-04 14:08:36 +01002817 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002818 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002819
2820 domain = get_valid_domain_for_dev(pdev);
2821 if (!domain)
2822 return 0;
2823
Weidong Han8c11e792008-12-08 15:29:22 +08002824 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002825 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002826
Mike Travisc681d0b2011-05-28 13:15:05 -05002827 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002828 if (!iova)
2829 goto error;
2830
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002831 /*
2832 * Check if DMAR supports zero-length reads on write only
2833 * mappings..
2834 */
2835 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002836 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002837 prot |= DMA_PTE_READ;
2838 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2839 prot |= DMA_PTE_WRITE;
2840 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002841 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002842 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002843 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002844 * is not a big problem
2845 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002846 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002847 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002848 if (ret)
2849 goto error;
2850
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002851 /* it's a non-present to present mapping. Only flush if caching mode */
2852 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002853 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002854 else
Weidong Han8c11e792008-12-08 15:29:22 +08002855 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002856
David Woodhouse03d6a242009-06-28 15:33:46 +01002857 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2858 start_paddr += paddr & ~PAGE_MASK;
2859 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002860
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002861error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002862 if (iova)
2863 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002864 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002865 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002866 return 0;
2867}
2868
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002869static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2870 unsigned long offset, size_t size,
2871 enum dma_data_direction dir,
2872 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002873{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002874 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2875 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002876}
2877
mark gross5e0d2a62008-03-04 15:22:08 -08002878static void flush_unmaps(void)
2879{
mark gross80b20dd2008-04-18 13:53:58 -07002880 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002881
mark gross5e0d2a62008-03-04 15:22:08 -08002882 timer_on = 0;
2883
2884 /* just flush them all */
2885 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002886 struct intel_iommu *iommu = g_iommus[i];
2887 if (!iommu)
2888 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002889
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002890 if (!deferred_flush[i].next)
2891 continue;
2892
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002893 /* In caching mode, global flushes turn emulation expensive */
2894 if (!cap_caching_mode(iommu->cap))
2895 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002896 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002897 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002898 unsigned long mask;
2899 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002900 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08002901
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002902 /* On real hardware multiple invalidations are expensive */
2903 if (cap_caching_mode(iommu->cap))
2904 iommu_flush_iotlb_psi(iommu, domain->id,
2905 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2906 else {
2907 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2908 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2909 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2910 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002911 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002912 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002913 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002914 }
2915
mark gross5e0d2a62008-03-04 15:22:08 -08002916 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002917}
2918
2919static void flush_unmaps_timeout(unsigned long data)
2920{
mark gross80b20dd2008-04-18 13:53:58 -07002921 unsigned long flags;
2922
2923 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002924 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002925 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002926}
2927
2928static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2929{
2930 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002931 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002932 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002933
2934 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002935 if (list_size == HIGH_WATER_MARK)
2936 flush_unmaps();
2937
Weidong Han8c11e792008-12-08 15:29:22 +08002938 iommu = domain_get_iommu(dom);
2939 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002940
mark gross80b20dd2008-04-18 13:53:58 -07002941 next = deferred_flush[iommu_id].next;
2942 deferred_flush[iommu_id].domain[next] = dom;
2943 deferred_flush[iommu_id].iova[next] = iova;
2944 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002945
2946 if (!timer_on) {
2947 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2948 timer_on = 1;
2949 }
2950 list_size++;
2951 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2952}
2953
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002954static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2955 size_t size, enum dma_data_direction dir,
2956 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002957{
2958 struct pci_dev *pdev = to_pci_dev(dev);
2959 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002960 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002961 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002962 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002963
David Woodhouse73676832009-07-04 14:08:36 +01002964 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002965 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002966
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002967 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002968 BUG_ON(!domain);
2969
Weidong Han8c11e792008-12-08 15:29:22 +08002970 iommu = domain_get_iommu(domain);
2971
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002972 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002973 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2974 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002975 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002976
David Woodhoused794dc92009-06-28 00:27:49 +01002977 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2978 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002979
David Woodhoused794dc92009-06-28 00:27:49 +01002980 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2981 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002982
2983 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002984 dma_pte_clear_range(domain, start_pfn, last_pfn);
2985
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002986 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002987 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2988
mark gross5e0d2a62008-03-04 15:22:08 -08002989 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002990 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002991 last_pfn - start_pfn + 1, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08002992 /* free iova */
2993 __free_iova(&domain->iovad, iova);
2994 } else {
2995 add_unmap(domain, iova);
2996 /*
2997 * queue up the release of the unmap to save the 1/6th of the
2998 * cpu used up by the iotlb flush operation...
2999 */
mark gross5e0d2a62008-03-04 15:22:08 -08003000 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003001}
3002
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003003static void *intel_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003004 dma_addr_t *dma_handle, gfp_t flags,
3005 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003006{
3007 void *vaddr;
3008 int order;
3009
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003010 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003011 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003012
3013 if (!iommu_no_mapping(hwdev))
3014 flags &= ~(GFP_DMA | GFP_DMA32);
3015 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
3016 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
3017 flags |= GFP_DMA;
3018 else
3019 flags |= GFP_DMA32;
3020 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003021
3022 vaddr = (void *)__get_free_pages(flags, order);
3023 if (!vaddr)
3024 return NULL;
3025 memset(vaddr, 0, size);
3026
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003027 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
3028 DMA_BIDIRECTIONAL,
3029 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003030 if (*dma_handle)
3031 return vaddr;
3032 free_pages((unsigned long)vaddr, order);
3033 return NULL;
3034}
3035
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003036static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003037 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003038{
3039 int order;
3040
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003041 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003042 order = get_order(size);
3043
David Woodhouse0db9b7a2009-07-14 02:01:57 +01003044 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003045 free_pages((unsigned long)vaddr, order);
3046}
3047
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003048static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
3049 int nelems, enum dma_data_direction dir,
3050 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003051{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003052 struct pci_dev *pdev = to_pci_dev(hwdev);
3053 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003054 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003055 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003056 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003057
David Woodhouse73676832009-07-04 14:08:36 +01003058 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003059 return;
3060
3061 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08003062 BUG_ON(!domain);
3063
3064 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003065
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003066 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01003067 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3068 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003069 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003070
David Woodhoused794dc92009-06-28 00:27:49 +01003071 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3072 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003073
3074 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01003075 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003076
David Woodhoused794dc92009-06-28 00:27:49 +01003077 /* free page tables */
3078 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3079
David Woodhouseacea0012009-07-14 01:55:11 +01003080 if (intel_iommu_strict) {
3081 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03003082 last_pfn - start_pfn + 1, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003083 /* free iova */
3084 __free_iova(&domain->iovad, iova);
3085 } else {
3086 add_unmap(domain, iova);
3087 /*
3088 * queue up the release of the unmap to save the 1/6th of the
3089 * cpu used up by the iotlb flush operation...
3090 */
3091 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003092}
3093
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003094static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003095 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003096{
3097 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003098 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003099
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003100 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003101 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003102 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003103 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003104 }
3105 return nelems;
3106}
3107
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003108static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3109 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003110{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003111 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003112 struct pci_dev *pdev = to_pci_dev(hwdev);
3113 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003114 size_t size = 0;
3115 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003116 struct iova *iova = NULL;
3117 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003118 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003119 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003120 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003121
3122 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01003123 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003124 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003125
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003126 domain = get_valid_domain_for_dev(pdev);
3127 if (!domain)
3128 return 0;
3129
Weidong Han8c11e792008-12-08 15:29:22 +08003130 iommu = domain_get_iommu(domain);
3131
David Woodhouseb536d242009-06-28 14:49:31 +01003132 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003133 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003134
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003135 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3136 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003137 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003138 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003139 return 0;
3140 }
3141
3142 /*
3143 * Check if DMAR supports zero-length reads on write only
3144 * mappings..
3145 */
3146 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003147 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003148 prot |= DMA_PTE_READ;
3149 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3150 prot |= DMA_PTE_WRITE;
3151
David Woodhouseb536d242009-06-28 14:49:31 +01003152 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003153
Fenghua Yuf5329592009-08-04 15:09:37 -07003154 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003155 if (unlikely(ret)) {
3156 /* clear the page */
3157 dma_pte_clear_range(domain, start_vpfn,
3158 start_vpfn + size - 1);
3159 /* free page tables */
3160 dma_pte_free_pagetable(domain, start_vpfn,
3161 start_vpfn + size - 1);
3162 /* free iova */
3163 __free_iova(&domain->iovad, iova);
3164 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003165 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003166
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003167 /* it's a non-present to present mapping. Only flush if caching mode */
3168 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03003169 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003170 else
Weidong Han8c11e792008-12-08 15:29:22 +08003171 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003172
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003173 return nelems;
3174}
3175
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003176static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3177{
3178 return !dma_addr;
3179}
3180
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003181struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003182 .alloc = intel_alloc_coherent,
3183 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003184 .map_sg = intel_map_sg,
3185 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003186 .map_page = intel_map_page,
3187 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003188 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003189};
3190
3191static inline int iommu_domain_cache_init(void)
3192{
3193 int ret = 0;
3194
3195 iommu_domain_cache = kmem_cache_create("iommu_domain",
3196 sizeof(struct dmar_domain),
3197 0,
3198 SLAB_HWCACHE_ALIGN,
3199
3200 NULL);
3201 if (!iommu_domain_cache) {
3202 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3203 ret = -ENOMEM;
3204 }
3205
3206 return ret;
3207}
3208
3209static inline int iommu_devinfo_cache_init(void)
3210{
3211 int ret = 0;
3212
3213 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3214 sizeof(struct device_domain_info),
3215 0,
3216 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003217 NULL);
3218 if (!iommu_devinfo_cache) {
3219 printk(KERN_ERR "Couldn't create devinfo cache\n");
3220 ret = -ENOMEM;
3221 }
3222
3223 return ret;
3224}
3225
3226static inline int iommu_iova_cache_init(void)
3227{
3228 int ret = 0;
3229
3230 iommu_iova_cache = kmem_cache_create("iommu_iova",
3231 sizeof(struct iova),
3232 0,
3233 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003234 NULL);
3235 if (!iommu_iova_cache) {
3236 printk(KERN_ERR "Couldn't create iova cache\n");
3237 ret = -ENOMEM;
3238 }
3239
3240 return ret;
3241}
3242
3243static int __init iommu_init_mempool(void)
3244{
3245 int ret;
3246 ret = iommu_iova_cache_init();
3247 if (ret)
3248 return ret;
3249
3250 ret = iommu_domain_cache_init();
3251 if (ret)
3252 goto domain_error;
3253
3254 ret = iommu_devinfo_cache_init();
3255 if (!ret)
3256 return ret;
3257
3258 kmem_cache_destroy(iommu_domain_cache);
3259domain_error:
3260 kmem_cache_destroy(iommu_iova_cache);
3261
3262 return -ENOMEM;
3263}
3264
3265static void __init iommu_exit_mempool(void)
3266{
3267 kmem_cache_destroy(iommu_devinfo_cache);
3268 kmem_cache_destroy(iommu_domain_cache);
3269 kmem_cache_destroy(iommu_iova_cache);
3270
3271}
3272
Dan Williams556ab452010-07-23 15:47:56 -07003273static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3274{
3275 struct dmar_drhd_unit *drhd;
3276 u32 vtbar;
3277 int rc;
3278
3279 /* We know that this device on this chipset has its own IOMMU.
3280 * If we find it under a different IOMMU, then the BIOS is lying
3281 * to us. Hope that the IOMMU for this device is actually
3282 * disabled, and it needs no translation...
3283 */
3284 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3285 if (rc) {
3286 /* "can't" happen */
3287 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3288 return;
3289 }
3290 vtbar &= 0xffff0000;
3291
3292 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3293 drhd = dmar_find_matched_drhd_unit(pdev);
3294 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3295 TAINT_FIRMWARE_WORKAROUND,
3296 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3297 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3298}
3299DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3300
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003301static void __init init_no_remapping_devices(void)
3302{
3303 struct dmar_drhd_unit *drhd;
3304
3305 for_each_drhd_unit(drhd) {
3306 if (!drhd->include_all) {
3307 int i;
3308 for (i = 0; i < drhd->devices_cnt; i++)
3309 if (drhd->devices[i] != NULL)
3310 break;
3311 /* ignore DMAR unit if no pci devices exist */
3312 if (i == drhd->devices_cnt)
3313 drhd->ignored = 1;
3314 }
3315 }
3316
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003317 for_each_drhd_unit(drhd) {
3318 int i;
3319 if (drhd->ignored || drhd->include_all)
3320 continue;
3321
3322 for (i = 0; i < drhd->devices_cnt; i++)
3323 if (drhd->devices[i] &&
David Woodhousec0771df2011-10-14 20:59:46 +01003324 !IS_GFX_DEVICE(drhd->devices[i]))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003325 break;
3326
3327 if (i < drhd->devices_cnt)
3328 continue;
3329
David Woodhousec0771df2011-10-14 20:59:46 +01003330 /* This IOMMU has *only* gfx devices. Either bypass it or
3331 set the gfx_mapped flag, as appropriate */
3332 if (dmar_map_gfx) {
3333 intel_iommu_gfx_mapped = 1;
3334 } else {
3335 drhd->ignored = 1;
3336 for (i = 0; i < drhd->devices_cnt; i++) {
3337 if (!drhd->devices[i])
3338 continue;
3339 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3340 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003341 }
3342 }
3343}
3344
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003345#ifdef CONFIG_SUSPEND
3346static int init_iommu_hw(void)
3347{
3348 struct dmar_drhd_unit *drhd;
3349 struct intel_iommu *iommu = NULL;
3350
3351 for_each_active_iommu(iommu, drhd)
3352 if (iommu->qi)
3353 dmar_reenable_qi(iommu);
3354
Joseph Cihulab7792602011-05-03 00:08:37 -07003355 for_each_iommu(iommu, drhd) {
3356 if (drhd->ignored) {
3357 /*
3358 * we always have to disable PMRs or DMA may fail on
3359 * this device
3360 */
3361 if (force_on)
3362 iommu_disable_protect_mem_regions(iommu);
3363 continue;
3364 }
3365
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003366 iommu_flush_write_buffer(iommu);
3367
3368 iommu_set_root_entry(iommu);
3369
3370 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003371 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003372 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003373 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003374 if (iommu_enable_translation(iommu))
3375 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003376 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003377 }
3378
3379 return 0;
3380}
3381
3382static void iommu_flush_all(void)
3383{
3384 struct dmar_drhd_unit *drhd;
3385 struct intel_iommu *iommu;
3386
3387 for_each_active_iommu(iommu, drhd) {
3388 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003389 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003390 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003391 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003392 }
3393}
3394
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003395static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003396{
3397 struct dmar_drhd_unit *drhd;
3398 struct intel_iommu *iommu = NULL;
3399 unsigned long flag;
3400
3401 for_each_active_iommu(iommu, drhd) {
3402 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3403 GFP_ATOMIC);
3404 if (!iommu->iommu_state)
3405 goto nomem;
3406 }
3407
3408 iommu_flush_all();
3409
3410 for_each_active_iommu(iommu, drhd) {
3411 iommu_disable_translation(iommu);
3412
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003413 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003414
3415 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3416 readl(iommu->reg + DMAR_FECTL_REG);
3417 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3418 readl(iommu->reg + DMAR_FEDATA_REG);
3419 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3420 readl(iommu->reg + DMAR_FEADDR_REG);
3421 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3422 readl(iommu->reg + DMAR_FEUADDR_REG);
3423
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003424 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003425 }
3426 return 0;
3427
3428nomem:
3429 for_each_active_iommu(iommu, drhd)
3430 kfree(iommu->iommu_state);
3431
3432 return -ENOMEM;
3433}
3434
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003435static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003436{
3437 struct dmar_drhd_unit *drhd;
3438 struct intel_iommu *iommu = NULL;
3439 unsigned long flag;
3440
3441 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003442 if (force_on)
3443 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3444 else
3445 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003446 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003447 }
3448
3449 for_each_active_iommu(iommu, drhd) {
3450
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003451 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003452
3453 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3454 iommu->reg + DMAR_FECTL_REG);
3455 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3456 iommu->reg + DMAR_FEDATA_REG);
3457 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3458 iommu->reg + DMAR_FEADDR_REG);
3459 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3460 iommu->reg + DMAR_FEUADDR_REG);
3461
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003462 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003463 }
3464
3465 for_each_active_iommu(iommu, drhd)
3466 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003467}
3468
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003469static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003470 .resume = iommu_resume,
3471 .suspend = iommu_suspend,
3472};
3473
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003474static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003475{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003476 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003477}
3478
3479#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003480static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003481#endif /* CONFIG_PM */
3482
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003483LIST_HEAD(dmar_rmrr_units);
3484
3485static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3486{
3487 list_add(&rmrr->list, &dmar_rmrr_units);
3488}
3489
3490
3491int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3492{
3493 struct acpi_dmar_reserved_memory *rmrr;
3494 struct dmar_rmrr_unit *rmrru;
3495
3496 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3497 if (!rmrru)
3498 return -ENOMEM;
3499
3500 rmrru->hdr = header;
3501 rmrr = (struct acpi_dmar_reserved_memory *)header;
3502 rmrru->base_address = rmrr->base_address;
3503 rmrru->end_address = rmrr->end_address;
3504
3505 dmar_register_rmrr_unit(rmrru);
3506 return 0;
3507}
3508
3509static int __init
3510rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3511{
3512 struct acpi_dmar_reserved_memory *rmrr;
3513 int ret;
3514
3515 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3516 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3517 ((void *)rmrr) + rmrr->header.length,
3518 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3519
3520 if (ret || (rmrru->devices_cnt == 0)) {
3521 list_del(&rmrru->list);
3522 kfree(rmrru);
3523 }
3524 return ret;
3525}
3526
3527static LIST_HEAD(dmar_atsr_units);
3528
3529int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3530{
3531 struct acpi_dmar_atsr *atsr;
3532 struct dmar_atsr_unit *atsru;
3533
3534 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3535 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3536 if (!atsru)
3537 return -ENOMEM;
3538
3539 atsru->hdr = hdr;
3540 atsru->include_all = atsr->flags & 0x1;
3541
3542 list_add(&atsru->list, &dmar_atsr_units);
3543
3544 return 0;
3545}
3546
3547static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3548{
3549 int rc;
3550 struct acpi_dmar_atsr *atsr;
3551
3552 if (atsru->include_all)
3553 return 0;
3554
3555 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3556 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3557 (void *)atsr + atsr->header.length,
3558 &atsru->devices_cnt, &atsru->devices,
3559 atsr->segment);
3560 if (rc || !atsru->devices_cnt) {
3561 list_del(&atsru->list);
3562 kfree(atsru);
3563 }
3564
3565 return rc;
3566}
3567
3568int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3569{
3570 int i;
3571 struct pci_bus *bus;
3572 struct acpi_dmar_atsr *atsr;
3573 struct dmar_atsr_unit *atsru;
3574
3575 dev = pci_physfn(dev);
3576
3577 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3578 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3579 if (atsr->segment == pci_domain_nr(dev->bus))
3580 goto found;
3581 }
3582
3583 return 0;
3584
3585found:
3586 for (bus = dev->bus; bus; bus = bus->parent) {
3587 struct pci_dev *bridge = bus->self;
3588
3589 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003590 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003591 return 0;
3592
Yijing Wang62f87c02012-07-24 17:20:03 +08003593 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003594 for (i = 0; i < atsru->devices_cnt; i++)
3595 if (atsru->devices[i] == bridge)
3596 return 1;
3597 break;
3598 }
3599 }
3600
3601 if (atsru->include_all)
3602 return 1;
3603
3604 return 0;
3605}
3606
Sergey Senozhatskyc8f369a2011-10-26 18:45:39 +03003607int __init dmar_parse_rmrr_atsr_dev(void)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003608{
3609 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3610 struct dmar_atsr_unit *atsr, *atsr_n;
3611 int ret = 0;
3612
3613 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3614 ret = rmrr_parse_dev(rmrr);
3615 if (ret)
3616 return ret;
3617 }
3618
3619 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3620 ret = atsr_parse_dev(atsr);
3621 if (ret)
3622 return ret;
3623 }
3624
3625 return ret;
3626}
3627
Fenghua Yu99dcade2009-11-11 07:23:06 -08003628/*
3629 * Here we only respond to action of unbound device from driver.
3630 *
3631 * Added device is not attached to its DMAR domain here yet. That will happen
3632 * when mapping the device to iova.
3633 */
3634static int device_notifier(struct notifier_block *nb,
3635 unsigned long action, void *data)
3636{
3637 struct device *dev = data;
3638 struct pci_dev *pdev = to_pci_dev(dev);
3639 struct dmar_domain *domain;
3640
David Woodhouse44cd6132009-12-02 10:18:30 +00003641 if (iommu_no_mapping(dev))
3642 return 0;
3643
Fenghua Yu99dcade2009-11-11 07:23:06 -08003644 domain = find_domain(pdev);
3645 if (!domain)
3646 return 0;
3647
Alex Williamsona97590e2011-03-04 14:52:16 -07003648 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
Fenghua Yu99dcade2009-11-11 07:23:06 -08003649 domain_remove_one_dev_info(domain, pdev);
3650
Alex Williamsona97590e2011-03-04 14:52:16 -07003651 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3652 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3653 list_empty(&domain->devices))
3654 domain_exit(domain);
3655 }
3656
Fenghua Yu99dcade2009-11-11 07:23:06 -08003657 return 0;
3658}
3659
3660static struct notifier_block device_nb = {
3661 .notifier_call = device_notifier,
3662};
3663
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003664int __init intel_iommu_init(void)
3665{
3666 int ret = 0;
Takao Indoh3a93c842013-04-23 17:35:03 +09003667 struct dmar_drhd_unit *drhd;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003668
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003669 /* VT-d is required for a TXT/tboot launch, so enforce that */
3670 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003671
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003672 if (dmar_table_init()) {
3673 if (force_on)
3674 panic("tboot: Failed to initialize DMAR table\n");
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003675 return -ENODEV;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003676 }
3677
Takao Indoh3a93c842013-04-23 17:35:03 +09003678 /*
3679 * Disable translation if already enabled prior to OS handover.
3680 */
3681 for_each_drhd_unit(drhd) {
3682 struct intel_iommu *iommu;
3683
3684 if (drhd->ignored)
3685 continue;
3686
3687 iommu = drhd->iommu;
3688 if (iommu->gcmd & DMA_GCMD_TE)
3689 iommu_disable_translation(iommu);
3690 }
3691
Suresh Siddhac2c72862011-08-23 17:05:19 -07003692 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003693 if (force_on)
3694 panic("tboot: Failed to initialize DMAR device scope\n");
3695 return -ENODEV;
3696 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003697
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003698 if (no_iommu || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003699 return -ENODEV;
3700
Joseph Cihula51a63e62011-03-21 11:04:24 -07003701 if (iommu_init_mempool()) {
3702 if (force_on)
3703 panic("tboot: Failed to initialize iommu memory\n");
3704 return -ENODEV;
3705 }
3706
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003707 if (list_empty(&dmar_rmrr_units))
3708 printk(KERN_INFO "DMAR: No RMRR found\n");
3709
3710 if (list_empty(&dmar_atsr_units))
3711 printk(KERN_INFO "DMAR: No ATSR found\n");
3712
Joseph Cihula51a63e62011-03-21 11:04:24 -07003713 if (dmar_init_reserved_ranges()) {
3714 if (force_on)
3715 panic("tboot: Failed to reserve iommu ranges\n");
3716 return -ENODEV;
3717 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003718
3719 init_no_remapping_devices();
3720
Joseph Cihulab7792602011-05-03 00:08:37 -07003721 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003722 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003723 if (force_on)
3724 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003725 printk(KERN_ERR "IOMMU: dmar init failed\n");
3726 put_iova_domain(&reserved_iova_list);
3727 iommu_exit_mempool();
3728 return ret;
3729 }
3730 printk(KERN_INFO
3731 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3732
mark gross5e0d2a62008-03-04 15:22:08 -08003733 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003734#ifdef CONFIG_SWIOTLB
3735 swiotlb = 0;
3736#endif
David Woodhouse19943b02009-08-04 16:19:20 +01003737 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003738
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003739 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003740
Joerg Roedel4236d97d2011-09-06 17:56:07 +02003741 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003742
Fenghua Yu99dcade2009-11-11 07:23:06 -08003743 bus_register_notifier(&pci_bus_type, &device_nb);
3744
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02003745 intel_iommu_enabled = 1;
3746
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003747 return 0;
3748}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003749
Han, Weidong3199aa62009-02-26 17:31:12 +08003750static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3751 struct pci_dev *pdev)
3752{
3753 struct pci_dev *tmp, *parent;
3754
3755 if (!iommu || !pdev)
3756 return;
3757
3758 /* dependent device detach */
3759 tmp = pci_find_upstream_pcie_bridge(pdev);
3760 /* Secondary interface's bus number and devfn 0 */
3761 if (tmp) {
3762 parent = pdev->bus->self;
3763 while (parent != tmp) {
3764 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003765 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003766 parent = parent->bus->self;
3767 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05003768 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08003769 iommu_detach_dev(iommu,
3770 tmp->subordinate->number, 0);
3771 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003772 iommu_detach_dev(iommu, tmp->bus->number,
3773 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003774 }
3775}
3776
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003777static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003778 struct pci_dev *pdev)
3779{
3780 struct device_domain_info *info;
3781 struct intel_iommu *iommu;
3782 unsigned long flags;
3783 int found = 0;
3784 struct list_head *entry, *tmp;
3785
David Woodhouse276dbf992009-04-04 01:45:37 +01003786 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3787 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003788 if (!iommu)
3789 return;
3790
3791 spin_lock_irqsave(&device_domain_lock, flags);
3792 list_for_each_safe(entry, tmp, &domain->devices) {
3793 info = list_entry(entry, struct device_domain_info, link);
Mike Habeck8519dc42011-05-28 13:15:07 -05003794 if (info->segment == pci_domain_nr(pdev->bus) &&
3795 info->bus == pdev->bus->number &&
Weidong Hanc7151a82008-12-08 22:51:37 +08003796 info->devfn == pdev->devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01003797 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003798 spin_unlock_irqrestore(&device_domain_lock, flags);
3799
Yu Zhao93a23a72009-05-18 13:51:37 +08003800 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003801 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003802 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003803 free_devinfo_mem(info);
3804
3805 spin_lock_irqsave(&device_domain_lock, flags);
3806
3807 if (found)
3808 break;
3809 else
3810 continue;
3811 }
3812
3813 /* if there is no other devices under the same iommu
3814 * owned by this domain, clear this iommu in iommu_bmp
3815 * update iommu count and coherency
3816 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003817 if (iommu == device_to_iommu(info->segment, info->bus,
3818 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003819 found = 1;
3820 }
3821
Roland Dreier3e7abe22011-07-20 06:22:21 -07003822 spin_unlock_irqrestore(&device_domain_lock, flags);
3823
Weidong Hanc7151a82008-12-08 22:51:37 +08003824 if (found == 0) {
3825 unsigned long tmp_flags;
3826 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08003827 clear_bit(iommu->seq_id, domain->iommu_bmp);
Weidong Hanc7151a82008-12-08 22:51:37 +08003828 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003829 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003830 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07003831
Alex Williamson9b4554b2011-05-24 12:19:04 -04003832 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3833 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3834 spin_lock_irqsave(&iommu->lock, tmp_flags);
3835 clear_bit(domain->id, iommu->domain_ids);
3836 iommu->domains[domain->id] = NULL;
3837 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3838 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003839 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003840}
3841
3842static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3843{
3844 struct device_domain_info *info;
3845 struct intel_iommu *iommu;
3846 unsigned long flags1, flags2;
3847
3848 spin_lock_irqsave(&device_domain_lock, flags1);
3849 while (!list_empty(&domain->devices)) {
3850 info = list_entry(domain->devices.next,
3851 struct device_domain_info, link);
David Woodhouse109b9b02012-05-25 17:43:02 +01003852 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003853 spin_unlock_irqrestore(&device_domain_lock, flags1);
3854
Yu Zhao93a23a72009-05-18 13:51:37 +08003855 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003856 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003857 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003858 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003859
3860 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003861 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003862 */
3863 spin_lock_irqsave(&domain->iommu_lock, flags2);
3864 if (test_and_clear_bit(iommu->seq_id,
Mike Travis1b198bb2012-03-05 15:05:16 -08003865 domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08003866 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003867 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003868 }
3869 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3870
3871 free_devinfo_mem(info);
3872 spin_lock_irqsave(&device_domain_lock, flags1);
3873 }
3874 spin_unlock_irqrestore(&device_domain_lock, flags1);
3875}
3876
Weidong Han5e98c4b2008-12-08 23:03:27 +08003877/* domain id for virtual machine, it won't be set in context */
3878static unsigned long vm_domid;
3879
3880static struct dmar_domain *iommu_alloc_vm_domain(void)
3881{
3882 struct dmar_domain *domain;
3883
3884 domain = alloc_domain_mem();
3885 if (!domain)
3886 return NULL;
3887
3888 domain->id = vm_domid++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003889 domain->nid = -1;
Mike Travis1b198bb2012-03-05 15:05:16 -08003890 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003891 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3892
3893 return domain;
3894}
3895
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003896static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003897{
3898 int adjust_width;
3899
3900 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003901 spin_lock_init(&domain->iommu_lock);
3902
3903 domain_reserve_special_ranges(domain);
3904
3905 /* calculate AGAW */
3906 domain->gaw = guest_width;
3907 adjust_width = guestwidth_to_adjustwidth(guest_width);
3908 domain->agaw = width_to_agaw(adjust_width);
3909
3910 INIT_LIST_HEAD(&domain->devices);
3911
3912 domain->iommu_count = 0;
3913 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003914 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01003915 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003916 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003917 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003918
3919 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07003920 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003921 if (!domain->pgd)
3922 return -ENOMEM;
3923 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3924 return 0;
3925}
3926
3927static void iommu_free_vm_domain(struct dmar_domain *domain)
3928{
3929 unsigned long flags;
3930 struct dmar_drhd_unit *drhd;
3931 struct intel_iommu *iommu;
3932 unsigned long i;
3933 unsigned long ndomains;
3934
3935 for_each_drhd_unit(drhd) {
3936 if (drhd->ignored)
3937 continue;
3938 iommu = drhd->iommu;
3939
3940 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08003941 for_each_set_bit(i, iommu->domain_ids, ndomains) {
Weidong Han5e98c4b2008-12-08 23:03:27 +08003942 if (iommu->domains[i] == domain) {
3943 spin_lock_irqsave(&iommu->lock, flags);
3944 clear_bit(i, iommu->domain_ids);
3945 iommu->domains[i] = NULL;
3946 spin_unlock_irqrestore(&iommu->lock, flags);
3947 break;
3948 }
Weidong Han5e98c4b2008-12-08 23:03:27 +08003949 }
3950 }
3951}
3952
3953static void vm_domain_exit(struct dmar_domain *domain)
3954{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003955 /* Domain 0 is reserved, so dont process it */
3956 if (!domain)
3957 return;
3958
3959 vm_domain_remove_all_dev_info(domain);
3960 /* destroy iovas */
3961 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003962
3963 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003964 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003965
3966 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003967 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003968
3969 iommu_free_vm_domain(domain);
3970 free_domain_mem(domain);
3971}
3972
Joerg Roedel5d450802008-12-03 14:52:32 +01003973static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003974{
Joerg Roedel5d450802008-12-03 14:52:32 +01003975 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003976
Joerg Roedel5d450802008-12-03 14:52:32 +01003977 dmar_domain = iommu_alloc_vm_domain();
3978 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003979 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003980 "intel_iommu_domain_init: dmar_domain == NULL\n");
3981 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003982 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003983 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003984 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003985 "intel_iommu_domain_init() failed\n");
3986 vm_domain_exit(dmar_domain);
3987 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003988 }
Allen Kay8140a952011-10-14 12:32:17 -07003989 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01003990 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003991
Joerg Roedel8a0e7152012-01-26 19:40:54 +01003992 domain->geometry.aperture_start = 0;
3993 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
3994 domain->geometry.force_aperture = true;
3995
Joerg Roedel5d450802008-12-03 14:52:32 +01003996 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003997}
Kay, Allen M38717942008-09-09 18:37:29 +03003998
Joerg Roedel5d450802008-12-03 14:52:32 +01003999static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004000{
Joerg Roedel5d450802008-12-03 14:52:32 +01004001 struct dmar_domain *dmar_domain = domain->priv;
4002
4003 domain->priv = NULL;
4004 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004005}
Kay, Allen M38717942008-09-09 18:37:29 +03004006
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004007static int intel_iommu_attach_device(struct iommu_domain *domain,
4008 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004009{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004010 struct dmar_domain *dmar_domain = domain->priv;
4011 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004012 struct intel_iommu *iommu;
4013 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03004014
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004015 /* normally pdev is not mapped */
4016 if (unlikely(domain_context_mapped(pdev))) {
4017 struct dmar_domain *old_domain;
4018
4019 old_domain = find_domain(pdev);
4020 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004021 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
4022 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
4023 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004024 else
4025 domain_remove_dev_info(old_domain);
4026 }
4027 }
4028
David Woodhouse276dbf992009-04-04 01:45:37 +01004029 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
4030 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004031 if (!iommu)
4032 return -ENODEV;
4033
4034 /* check if this iommu agaw is sufficient for max mapped address */
4035 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004036 if (addr_width > cap_mgaw(iommu->cap))
4037 addr_width = cap_mgaw(iommu->cap);
4038
4039 if (dmar_domain->max_addr > (1LL << addr_width)) {
4040 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004041 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004042 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004043 return -EFAULT;
4044 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004045 dmar_domain->gaw = addr_width;
4046
4047 /*
4048 * Knock out extra levels of page tables if necessary
4049 */
4050 while (iommu->agaw < dmar_domain->agaw) {
4051 struct dma_pte *pte;
4052
4053 pte = dmar_domain->pgd;
4054 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004055 dmar_domain->pgd = (struct dma_pte *)
4056 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004057 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004058 }
4059 dmar_domain->agaw--;
4060 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004061
David Woodhouse5fe60f42009-08-09 10:53:41 +01004062 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004063}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004064
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004065static void intel_iommu_detach_device(struct iommu_domain *domain,
4066 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004067{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004068 struct dmar_domain *dmar_domain = domain->priv;
4069 struct pci_dev *pdev = to_pci_dev(dev);
4070
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004071 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03004072}
Kay, Allen M38717942008-09-09 18:37:29 +03004073
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004074static int intel_iommu_map(struct iommu_domain *domain,
4075 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004076 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004077{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004078 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004079 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004080 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004081 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004082
Joerg Roedeldde57a22008-12-03 15:04:09 +01004083 if (iommu_prot & IOMMU_READ)
4084 prot |= DMA_PTE_READ;
4085 if (iommu_prot & IOMMU_WRITE)
4086 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004087 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4088 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004089
David Woodhouse163cc522009-06-28 00:51:17 +01004090 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004091 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004092 u64 end;
4093
4094 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004095 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004096 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004097 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004098 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004099 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004100 return -EFAULT;
4101 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004102 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004103 }
David Woodhousead051222009-06-28 14:22:28 +01004104 /* Round up size to next multiple of PAGE_SIZE, if it and
4105 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004106 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004107 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4108 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004109 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004110}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004111
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004112static size_t intel_iommu_unmap(struct iommu_domain *domain,
4113 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004114{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004115 struct dmar_domain *dmar_domain = domain->priv;
Allen Kay292827c2011-10-14 12:31:54 -07004116 int order;
Sheng Yang4b99d352009-07-08 11:52:52 +01004117
Allen Kay292827c2011-10-14 12:31:54 -07004118 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
David Woodhouse163cc522009-06-28 00:51:17 +01004119 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004120
David Woodhouse163cc522009-06-28 00:51:17 +01004121 if (dmar_domain->max_addr == iova + size)
4122 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004123
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004124 return PAGE_SIZE << order;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004125}
Kay, Allen M38717942008-09-09 18:37:29 +03004126
Joerg Roedeld14d6572008-12-03 15:06:57 +01004127static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05304128 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004129{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004130 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004131 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004132 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004133
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004134 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
Kay, Allen M38717942008-09-09 18:37:29 +03004135 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004136 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004137
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004138 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004139}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004140
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004141static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4142 unsigned long cap)
4143{
4144 struct dmar_domain *dmar_domain = domain->priv;
4145
4146 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4147 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004148 if (cap == IOMMU_CAP_INTR_REMAP)
Suresh Siddha95a02e92012-03-30 11:47:07 -07004149 return irq_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004150
4151 return 0;
4152}
4153
Alex Williamson783f1572012-05-30 14:19:43 -06004154#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4155
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004156static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004157{
4158 struct pci_dev *pdev = to_pci_dev(dev);
Alex Williamson3da4af0a2012-11-13 10:22:03 -07004159 struct pci_dev *bridge, *dma_pdev = NULL;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004160 struct iommu_group *group;
4161 int ret;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004162
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004163 if (!device_to_iommu(pci_domain_nr(pdev->bus),
4164 pdev->bus->number, pdev->devfn))
Alex Williamson70ae6f02011-10-21 15:56:11 -04004165 return -ENODEV;
4166
4167 bridge = pci_find_upstream_pcie_bridge(pdev);
4168 if (bridge) {
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004169 if (pci_is_pcie(bridge))
4170 dma_pdev = pci_get_domain_bus_and_slot(
4171 pci_domain_nr(pdev->bus),
4172 bridge->subordinate->number, 0);
Alex Williamson3da4af0a2012-11-13 10:22:03 -07004173 if (!dma_pdev)
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004174 dma_pdev = pci_dev_get(bridge);
4175 } else
4176 dma_pdev = pci_dev_get(pdev);
4177
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004178 /* Account for quirked devices */
Alex Williamson783f1572012-05-30 14:19:43 -06004179 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
4180
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004181 /*
4182 * If it's a multifunction device that does not support our
Alex Williamsonc14d2692013-05-30 12:39:18 -06004183 * required ACS flags, add to the same group as lowest numbered
4184 * function that also does not suport the required ACS flags.
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004185 */
Alex Williamson783f1572012-05-30 14:19:43 -06004186 if (dma_pdev->multifunction &&
Alex Williamsonc14d2692013-05-30 12:39:18 -06004187 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
4188 u8 i, slot = PCI_SLOT(dma_pdev->devfn);
4189
4190 for (i = 0; i < 8; i++) {
4191 struct pci_dev *tmp;
4192
4193 tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
4194 if (!tmp)
4195 continue;
4196
4197 if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
4198 swap_pci_ref(&dma_pdev, tmp);
4199 break;
4200 }
4201 pci_dev_put(tmp);
4202 }
4203 }
Alex Williamson783f1572012-05-30 14:19:43 -06004204
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004205 /*
4206 * Devices on the root bus go through the iommu. If that's not us,
4207 * find the next upstream device and test ACS up to the root bus.
4208 * Finding the next device may require skipping virtual buses.
4209 */
Alex Williamson783f1572012-05-30 14:19:43 -06004210 while (!pci_is_root_bus(dma_pdev->bus)) {
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004211 struct pci_bus *bus = dma_pdev->bus;
4212
4213 while (!bus->self) {
4214 if (!pci_is_root_bus(bus))
4215 bus = bus->parent;
4216 else
4217 goto root_bus;
4218 }
4219
4220 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
Alex Williamson783f1572012-05-30 14:19:43 -06004221 break;
4222
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004223 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
Alex Williamson70ae6f02011-10-21 15:56:11 -04004224 }
4225
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004226root_bus:
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004227 group = iommu_group_get(&dma_pdev->dev);
4228 pci_dev_put(dma_pdev);
4229 if (!group) {
4230 group = iommu_group_alloc();
4231 if (IS_ERR(group))
4232 return PTR_ERR(group);
4233 }
Alex Williamsonbcb71ab2011-10-21 15:56:24 -04004234
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004235 ret = iommu_group_add_device(group, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004236
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004237 iommu_group_put(group);
4238 return ret;
4239}
4240
4241static void intel_iommu_remove_device(struct device *dev)
4242{
4243 iommu_group_remove_device(dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004244}
4245
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004246static struct iommu_ops intel_iommu_ops = {
4247 .domain_init = intel_iommu_domain_init,
4248 .domain_destroy = intel_iommu_domain_destroy,
4249 .attach_dev = intel_iommu_attach_device,
4250 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004251 .map = intel_iommu_map,
4252 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004253 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004254 .domain_has_cap = intel_iommu_domain_has_cap,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004255 .add_device = intel_iommu_add_device,
4256 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004257 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004258};
David Woodhouse9af88142009-02-13 23:18:03 +00004259
Daniel Vetter94526182013-01-20 23:50:13 +01004260static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4261{
4262 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4263 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4264 dmar_map_gfx = 0;
4265}
4266
4267DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4268DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4269DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4270DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4271DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4272DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4273DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4274
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004275static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004276{
4277 /*
4278 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004279 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004280 */
4281 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4282 rwbf_quirk = 1;
4283}
4284
4285DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004286DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4287DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4288DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4289DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4290DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4291DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004292
Adam Jacksoneecfd572010-08-25 21:17:34 +01004293#define GGC 0x52
4294#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4295#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4296#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4297#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4298#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4299#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4300#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4301#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4302
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004303static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004304{
4305 unsigned short ggc;
4306
Adam Jacksoneecfd572010-08-25 21:17:34 +01004307 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004308 return;
4309
Adam Jacksoneecfd572010-08-25 21:17:34 +01004310 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004311 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4312 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004313 } else if (dmar_map_gfx) {
4314 /* we have to ensure the gfx device is idle before we flush */
4315 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4316 intel_iommu_strict = 1;
4317 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004318}
4319DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4320DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4321DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4322DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4323
David Woodhousee0fc7e02009-09-30 09:12:17 -07004324/* On Tylersburg chipsets, some BIOSes have been known to enable the
4325 ISOCH DMAR unit for the Azalia sound device, but not give it any
4326 TLB entries, which causes it to deadlock. Check for that. We do
4327 this in a function called from init_dmars(), instead of in a PCI
4328 quirk, because we don't want to print the obnoxious "BIOS broken"
4329 message if VT-d is actually disabled.
4330*/
4331static void __init check_tylersburg_isoch(void)
4332{
4333 struct pci_dev *pdev;
4334 uint32_t vtisochctrl;
4335
4336 /* If there's no Azalia in the system anyway, forget it. */
4337 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4338 if (!pdev)
4339 return;
4340 pci_dev_put(pdev);
4341
4342 /* System Management Registers. Might be hidden, in which case
4343 we can't do the sanity check. But that's OK, because the
4344 known-broken BIOSes _don't_ actually hide it, so far. */
4345 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4346 if (!pdev)
4347 return;
4348
4349 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4350 pci_dev_put(pdev);
4351 return;
4352 }
4353
4354 pci_dev_put(pdev);
4355
4356 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4357 if (vtisochctrl & 1)
4358 return;
4359
4360 /* Drop all bits other than the number of TLB entries */
4361 vtisochctrl &= 0x1c;
4362
4363 /* If we have the recommended number of TLB entries (16), fine. */
4364 if (vtisochctrl == 0x10)
4365 return;
4366
4367 /* Zero TLB entries? You get to ride the short bus to school. */
4368 if (!vtisochctrl) {
4369 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4370 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4371 dmi_get_system_info(DMI_BIOS_VENDOR),
4372 dmi_get_system_info(DMI_BIOS_VERSION),
4373 dmi_get_system_info(DMI_PRODUCT_VERSION));
4374 iommu_identity_mapping |= IDENTMAP_AZALIA;
4375 return;
4376 }
4377
4378 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4379 vtisochctrl);
4380}