blob: 0cbf1dda07308d411067e0735d10fae0c5e282b8 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040027#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070028#include <linux/slab.h>
29#include <linux/irq.h>
30#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070031#include <linux/spinlock.h>
32#include <linux/pci.h>
33#include <linux/dmar.h>
34#include <linux/dma-mapping.h>
35#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080044#include <linux/memblock.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070045#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090047#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070048
Joerg Roedel078e1ee2012-09-26 12:44:43 +020049#include "irq_remapping.h"
Varun Sethi61e015a2013-04-23 10:05:24 +053050#include "pci.h"
Joerg Roedel078e1ee2012-09-26 12:44:43 +020051
Fenghua Yu5b6985c2008-10-16 18:02:32 -070052#define ROOT_SIZE VTD_PAGE_SIZE
53#define CONTEXT_SIZE VTD_PAGE_SIZE
54
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
56#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070057#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058
59#define IOAPIC_RANGE_START (0xfee00000)
60#define IOAPIC_RANGE_END (0xfeefffff)
61#define IOVA_START_ADDR (0x1000)
62
63#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
64
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070065#define MAX_AGAW_WIDTH 64
66
David Woodhouse2ebe3152009-09-19 07:34:04 -070067#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
68#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
69
70/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
71 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
72#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
73 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
74#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070075
Mark McLoughlinf27be032008-11-20 15:49:43 +000076#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070077#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070078#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080079
Andrew Mortondf08cdc2010-09-22 13:05:11 -070080/* page table handling */
81#define LEVEL_STRIDE (9)
82#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
83
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020084/*
85 * This bitmap is used to advertise the page sizes our hardware support
86 * to the IOMMU core, which will then use this information to split
87 * physically contiguous memory regions it is mapping into page sizes
88 * that we support.
89 *
90 * Traditionally the IOMMU core just handed us the mappings directly,
91 * after making sure the size is an order of a 4KiB page and that the
92 * mapping has natural alignment.
93 *
94 * To retain this behavior, we currently advertise that we support
95 * all page sizes that are an order of 4KiB.
96 *
97 * If at some point we'd like to utilize the IOMMU core's new behavior,
98 * we could change this to advertise the real page sizes we support.
99 */
100#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
101
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700102static inline int agaw_to_level(int agaw)
103{
104 return agaw + 2;
105}
106
107static inline int agaw_to_width(int agaw)
108{
109 return 30 + agaw * LEVEL_STRIDE;
110}
111
112static inline int width_to_agaw(int width)
113{
114 return (width - 30) / LEVEL_STRIDE;
115}
116
117static inline unsigned int level_to_offset_bits(int level)
118{
119 return (level - 1) * LEVEL_STRIDE;
120}
121
122static inline int pfn_level_offset(unsigned long pfn, int level)
123{
124 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
125}
126
127static inline unsigned long level_mask(int level)
128{
129 return -1UL << level_to_offset_bits(level);
130}
131
132static inline unsigned long level_size(int level)
133{
134 return 1UL << level_to_offset_bits(level);
135}
136
137static inline unsigned long align_to_level(unsigned long pfn, int level)
138{
139 return (pfn + level_size(level) - 1) & level_mask(level);
140}
David Woodhousefd18de52009-05-10 23:57:41 +0100141
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100142static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
143{
144 return 1 << ((lvl - 1) * LEVEL_STRIDE);
145}
146
David Woodhousedd4e8312009-06-27 16:21:20 +0100147/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
148 are never going to work. */
149static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
150{
151 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
152}
153
154static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
155{
156 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
157}
158static inline unsigned long page_to_dma_pfn(struct page *pg)
159{
160 return mm_to_dma_pfn(page_to_pfn(pg));
161}
162static inline unsigned long virt_to_dma_pfn(void *p)
163{
164 return page_to_dma_pfn(virt_to_page(p));
165}
166
Weidong Hand9630fe2008-12-08 11:06:32 +0800167/* global iommu list, set NULL for ignored DMAR units */
168static struct intel_iommu **g_iommus;
169
David Woodhousee0fc7e02009-09-30 09:12:17 -0700170static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000171static int rwbf_quirk;
172
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000173/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700174 * set to 1 to panic kernel if can't successfully enable VT-d
175 * (used when kernel is launched w/ TXT)
176 */
177static int force_on = 0;
178
179/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000180 * 0: Present
181 * 1-11: Reserved
182 * 12-63: Context Ptr (12 - (haw-1))
183 * 64-127: Reserved
184 */
185struct root_entry {
186 u64 val;
187 u64 rsvd1;
188};
189#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
190static inline bool root_present(struct root_entry *root)
191{
192 return (root->val & 1);
193}
194static inline void set_root_present(struct root_entry *root)
195{
196 root->val |= 1;
197}
198static inline void set_root_value(struct root_entry *root, unsigned long value)
199{
200 root->val |= value & VTD_PAGE_MASK;
201}
202
203static inline struct context_entry *
204get_context_addr_from_root(struct root_entry *root)
205{
206 return (struct context_entry *)
207 (root_present(root)?phys_to_virt(
208 root->val & VTD_PAGE_MASK) :
209 NULL);
210}
211
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000212/*
213 * low 64 bits:
214 * 0: present
215 * 1: fault processing disable
216 * 2-3: translation type
217 * 12-63: address space root
218 * high 64 bits:
219 * 0-2: address width
220 * 3-6: aval
221 * 8-23: domain id
222 */
223struct context_entry {
224 u64 lo;
225 u64 hi;
226};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000227
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000228static inline bool context_present(struct context_entry *context)
229{
230 return (context->lo & 1);
231}
232static inline void context_set_present(struct context_entry *context)
233{
234 context->lo |= 1;
235}
236
237static inline void context_set_fault_enable(struct context_entry *context)
238{
239 context->lo &= (((u64)-1) << 2) | 1;
240}
241
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000242static inline void context_set_translation_type(struct context_entry *context,
243 unsigned long value)
244{
245 context->lo &= (((u64)-1) << 4) | 3;
246 context->lo |= (value & 3) << 2;
247}
248
249static inline void context_set_address_root(struct context_entry *context,
250 unsigned long value)
251{
252 context->lo |= value & VTD_PAGE_MASK;
253}
254
255static inline void context_set_address_width(struct context_entry *context,
256 unsigned long value)
257{
258 context->hi |= value & 7;
259}
260
261static inline void context_set_domain_id(struct context_entry *context,
262 unsigned long value)
263{
264 context->hi |= (value & ((1 << 16) - 1)) << 8;
265}
266
267static inline void context_clear_entry(struct context_entry *context)
268{
269 context->lo = 0;
270 context->hi = 0;
271}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000272
Mark McLoughlin622ba122008-11-20 15:49:46 +0000273/*
274 * 0: readable
275 * 1: writable
276 * 2-6: reserved
277 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800278 * 8-10: available
279 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000280 * 12-63: Host physcial address
281 */
282struct dma_pte {
283 u64 val;
284};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000285
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000286static inline void dma_clear_pte(struct dma_pte *pte)
287{
288 pte->val = 0;
289}
290
291static inline void dma_set_pte_readable(struct dma_pte *pte)
292{
293 pte->val |= DMA_PTE_READ;
294}
295
296static inline void dma_set_pte_writable(struct dma_pte *pte)
297{
298 pte->val |= DMA_PTE_WRITE;
299}
300
Sheng Yang9cf066972009-03-18 15:33:07 +0800301static inline void dma_set_pte_snp(struct dma_pte *pte)
302{
303 pte->val |= DMA_PTE_SNP;
304}
305
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000306static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
307{
308 pte->val = (pte->val & ~3) | (prot & 3);
309}
310
311static inline u64 dma_pte_addr(struct dma_pte *pte)
312{
David Woodhousec85994e2009-07-01 19:21:24 +0100313#ifdef CONFIG_64BIT
314 return pte->val & VTD_PAGE_MASK;
315#else
316 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100317 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100318#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000319}
320
David Woodhousedd4e8312009-06-27 16:21:20 +0100321static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000322{
David Woodhousedd4e8312009-06-27 16:21:20 +0100323 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000324}
325
326static inline bool dma_pte_present(struct dma_pte *pte)
327{
328 return (pte->val & 3) != 0;
329}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000330
Allen Kay4399c8b2011-10-14 12:32:46 -0700331static inline bool dma_pte_superpage(struct dma_pte *pte)
332{
333 return (pte->val & (1 << 7));
334}
335
David Woodhouse75e6bf92009-07-02 11:21:16 +0100336static inline int first_pte_in_page(struct dma_pte *pte)
337{
338 return !((unsigned long)pte & ~VTD_PAGE_MASK);
339}
340
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700341/*
342 * This domain is a statically identity mapping domain.
343 * 1. This domain creats a static 1:1 mapping to all usable memory.
344 * 2. It maps to each iommu if successful.
345 * 3. Each iommu mapps to this domain if successful.
346 */
David Woodhouse19943b02009-08-04 16:19:20 +0100347static struct dmar_domain *si_domain;
348static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700349
Weidong Han3b5410e2008-12-08 09:17:15 +0800350/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100351#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800352
Weidong Han1ce28fe2008-12-08 16:35:39 +0800353/* domain represents a virtual machine, more than one devices
354 * across iommus may be owned in one domain, e.g. kvm guest.
355 */
356#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
357
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700358/* si_domain contains mulitple devices */
359#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
360
Mike Travis1b198bb2012-03-05 15:05:16 -0800361/* define the limit of IOMMUs supported in each domain */
362#ifdef CONFIG_X86
363# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
364#else
365# define IOMMU_UNITS_SUPPORTED 64
366#endif
367
Mark McLoughlin99126f72008-11-20 15:49:47 +0000368struct dmar_domain {
369 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700370 int nid; /* node id */
Mike Travis1b198bb2012-03-05 15:05:16 -0800371 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
372 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000373
374 struct list_head devices; /* all devices' list */
375 struct iova_domain iovad; /* iova's that belong to this domain */
376
377 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000378 int gaw; /* max guest address width */
379
380 /* adjusted guest address width, 0 is level 2 30-bit */
381 int agaw;
382
Weidong Han3b5410e2008-12-08 09:17:15 +0800383 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800384
385 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800386 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800387 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100388 int iommu_superpage;/* Level of superpages supported:
389 0 == 4KiB (no superpages), 1 == 2MiB,
390 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800391 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800392 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000393};
394
Mark McLoughlina647dac2008-11-20 15:49:48 +0000395/* PCI domain-device relationship */
396struct device_domain_info {
397 struct list_head link; /* link to domain siblings */
398 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100399 int segment; /* PCI domain */
400 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000401 u8 devfn; /* PCI devfn number */
Stefan Assmann45e829e2009-12-03 06:49:24 -0500402 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800403 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000404 struct dmar_domain *domain; /* pointer to domain */
405};
406
mark gross5e0d2a62008-03-04 15:22:08 -0800407static void flush_unmaps_timeout(unsigned long data);
408
409DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
410
mark gross80b20dd2008-04-18 13:53:58 -0700411#define HIGH_WATER_MARK 250
412struct deferred_flush_tables {
413 int next;
414 struct iova *iova[HIGH_WATER_MARK];
415 struct dmar_domain *domain[HIGH_WATER_MARK];
416};
417
418static struct deferred_flush_tables *deferred_flush;
419
mark gross5e0d2a62008-03-04 15:22:08 -0800420/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800421static int g_num_of_iommus;
422
423static DEFINE_SPINLOCK(async_umap_flush_lock);
424static LIST_HEAD(unmaps_to_do);
425
426static int timer_on;
427static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800428
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700429static void domain_remove_dev_info(struct dmar_domain *domain);
430
Suresh Siddhad3f13812011-08-23 17:05:25 -0700431#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800432int dmar_disabled = 0;
433#else
434int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700435#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800436
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200437int intel_iommu_enabled = 0;
438EXPORT_SYMBOL_GPL(intel_iommu_enabled);
439
David Woodhouse2d9e6672010-06-15 10:57:57 +0100440static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700441static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800442static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100443static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700444
David Woodhousec0771df2011-10-14 20:59:46 +0100445int intel_iommu_gfx_mapped;
446EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
447
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700448#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
449static DEFINE_SPINLOCK(device_domain_lock);
450static LIST_HEAD(device_domain_list);
451
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100452static struct iommu_ops intel_iommu_ops;
453
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700454static int __init intel_iommu_setup(char *str)
455{
456 if (!str)
457 return -EINVAL;
458 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800459 if (!strncmp(str, "on", 2)) {
460 dmar_disabled = 0;
461 printk(KERN_INFO "Intel-IOMMU: enabled\n");
462 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700463 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800464 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700465 } else if (!strncmp(str, "igfx_off", 8)) {
466 dmar_map_gfx = 0;
467 printk(KERN_INFO
468 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700469 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800470 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700471 "Intel-IOMMU: Forcing DAC for PCI devices\n");
472 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800473 } else if (!strncmp(str, "strict", 6)) {
474 printk(KERN_INFO
475 "Intel-IOMMU: disable batched IOTLB flush\n");
476 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100477 } else if (!strncmp(str, "sp_off", 6)) {
478 printk(KERN_INFO
479 "Intel-IOMMU: disable supported super page\n");
480 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700481 }
482
483 str += strcspn(str, ",");
484 while (*str == ',')
485 str++;
486 }
487 return 0;
488}
489__setup("intel_iommu=", intel_iommu_setup);
490
491static struct kmem_cache *iommu_domain_cache;
492static struct kmem_cache *iommu_devinfo_cache;
493static struct kmem_cache *iommu_iova_cache;
494
Suresh Siddha4c923d42009-10-02 11:01:24 -0700495static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700496{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700497 struct page *page;
498 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700499
Suresh Siddha4c923d42009-10-02 11:01:24 -0700500 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
501 if (page)
502 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700503 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700504}
505
506static inline void free_pgtable_page(void *vaddr)
507{
508 free_page((unsigned long)vaddr);
509}
510
511static inline void *alloc_domain_mem(void)
512{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900513 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700514}
515
Kay, Allen M38717942008-09-09 18:37:29 +0300516static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700517{
518 kmem_cache_free(iommu_domain_cache, vaddr);
519}
520
521static inline void * alloc_devinfo_mem(void)
522{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900523 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700524}
525
526static inline void free_devinfo_mem(void *vaddr)
527{
528 kmem_cache_free(iommu_devinfo_cache, vaddr);
529}
530
531struct iova *alloc_iova_mem(void)
532{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900533 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700534}
535
536void free_iova_mem(struct iova *iova)
537{
538 kmem_cache_free(iommu_iova_cache, iova);
539}
540
Weidong Han1b573682008-12-08 15:34:06 +0800541
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700542static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800543{
544 unsigned long sagaw;
545 int agaw = -1;
546
547 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700548 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800549 agaw >= 0; agaw--) {
550 if (test_bit(agaw, &sagaw))
551 break;
552 }
553
554 return agaw;
555}
556
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700557/*
558 * Calculate max SAGAW for each iommu.
559 */
560int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
561{
562 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
563}
564
565/*
566 * calculate agaw for each iommu.
567 * "SAGAW" may be different across iommus, use a default agaw, and
568 * get a supported less agaw for iommus that don't support the default agaw.
569 */
570int iommu_calculate_agaw(struct intel_iommu *iommu)
571{
572 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
573}
574
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700575/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800576static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
577{
578 int iommu_id;
579
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700580 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800581 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700582 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800583
Mike Travis1b198bb2012-03-05 15:05:16 -0800584 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800585 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
586 return NULL;
587
588 return g_iommus[iommu_id];
589}
590
Weidong Han8e6040972008-12-08 15:49:06 +0800591static void domain_update_iommu_coherency(struct dmar_domain *domain)
592{
593 int i;
594
Alex Williamson2e12bc22011-11-11 17:26:44 -0700595 i = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
596
597 domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800598
Mike Travis1b198bb2012-03-05 15:05:16 -0800599 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Weidong Han8e6040972008-12-08 15:49:06 +0800600 if (!ecap_coherent(g_iommus[i]->ecap)) {
601 domain->iommu_coherency = 0;
602 break;
603 }
Weidong Han8e6040972008-12-08 15:49:06 +0800604 }
605}
606
Sheng Yang58c610b2009-03-18 15:33:05 +0800607static void domain_update_iommu_snooping(struct dmar_domain *domain)
608{
609 int i;
610
611 domain->iommu_snooping = 1;
612
Mike Travis1b198bb2012-03-05 15:05:16 -0800613 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800614 if (!ecap_sc_support(g_iommus[i]->ecap)) {
615 domain->iommu_snooping = 0;
616 break;
617 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800618 }
619}
620
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100621static void domain_update_iommu_superpage(struct dmar_domain *domain)
622{
Allen Kay8140a952011-10-14 12:32:17 -0700623 struct dmar_drhd_unit *drhd;
624 struct intel_iommu *iommu = NULL;
625 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100626
627 if (!intel_iommu_superpage) {
628 domain->iommu_superpage = 0;
629 return;
630 }
631
Allen Kay8140a952011-10-14 12:32:17 -0700632 /* set iommu_superpage to the smallest common denominator */
633 for_each_active_iommu(iommu, drhd) {
634 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100635 if (!mask) {
636 break;
637 }
638 }
639 domain->iommu_superpage = fls(mask);
640}
641
Sheng Yang58c610b2009-03-18 15:33:05 +0800642/* Some capabilities may be different across iommus */
643static void domain_update_iommu_cap(struct dmar_domain *domain)
644{
645 domain_update_iommu_coherency(domain);
646 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100647 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800648}
649
David Woodhouse276dbf992009-04-04 01:45:37 +0100650static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800651{
652 struct dmar_drhd_unit *drhd = NULL;
653 int i;
654
655 for_each_drhd_unit(drhd) {
656 if (drhd->ignored)
657 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100658 if (segment != drhd->segment)
659 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800660
David Woodhouse924b6232009-04-04 00:39:25 +0100661 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000662 if (drhd->devices[i] &&
663 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800664 drhd->devices[i]->devfn == devfn)
665 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700666 if (drhd->devices[i] &&
667 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100668 drhd->devices[i]->subordinate->number <= bus &&
Yinghai Lub918c622012-05-17 18:51:11 -0700669 drhd->devices[i]->subordinate->busn_res.end >= bus)
David Woodhouse924b6232009-04-04 00:39:25 +0100670 return drhd->iommu;
671 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800672
673 if (drhd->include_all)
674 return drhd->iommu;
675 }
676
677 return NULL;
678}
679
Weidong Han5331fe62008-12-08 23:00:00 +0800680static void domain_flush_cache(struct dmar_domain *domain,
681 void *addr, int size)
682{
683 if (!domain->iommu_coherency)
684 clflush_cache_range(addr, size);
685}
686
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700687/* Gets context entry for a given bus and devfn */
688static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
689 u8 bus, u8 devfn)
690{
691 struct root_entry *root;
692 struct context_entry *context;
693 unsigned long phy_addr;
694 unsigned long flags;
695
696 spin_lock_irqsave(&iommu->lock, flags);
697 root = &iommu->root_entry[bus];
698 context = get_context_addr_from_root(root);
699 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700700 context = (struct context_entry *)
701 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700702 if (!context) {
703 spin_unlock_irqrestore(&iommu->lock, flags);
704 return NULL;
705 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700706 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700707 phy_addr = virt_to_phys((void *)context);
708 set_root_value(root, phy_addr);
709 set_root_present(root);
710 __iommu_flush_cache(iommu, root, sizeof(*root));
711 }
712 spin_unlock_irqrestore(&iommu->lock, flags);
713 return &context[devfn];
714}
715
716static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
717{
718 struct root_entry *root;
719 struct context_entry *context;
720 int ret;
721 unsigned long flags;
722
723 spin_lock_irqsave(&iommu->lock, flags);
724 root = &iommu->root_entry[bus];
725 context = get_context_addr_from_root(root);
726 if (!context) {
727 ret = 0;
728 goto out;
729 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000730 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700731out:
732 spin_unlock_irqrestore(&iommu->lock, flags);
733 return ret;
734}
735
736static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
737{
738 struct root_entry *root;
739 struct context_entry *context;
740 unsigned long flags;
741
742 spin_lock_irqsave(&iommu->lock, flags);
743 root = &iommu->root_entry[bus];
744 context = get_context_addr_from_root(root);
745 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000746 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700747 __iommu_flush_cache(iommu, &context[devfn], \
748 sizeof(*context));
749 }
750 spin_unlock_irqrestore(&iommu->lock, flags);
751}
752
753static void free_context_table(struct intel_iommu *iommu)
754{
755 struct root_entry *root;
756 int i;
757 unsigned long flags;
758 struct context_entry *context;
759
760 spin_lock_irqsave(&iommu->lock, flags);
761 if (!iommu->root_entry) {
762 goto out;
763 }
764 for (i = 0; i < ROOT_ENTRY_NR; i++) {
765 root = &iommu->root_entry[i];
766 context = get_context_addr_from_root(root);
767 if (context)
768 free_pgtable_page(context);
769 }
770 free_pgtable_page(iommu->root_entry);
771 iommu->root_entry = NULL;
772out:
773 spin_unlock_irqrestore(&iommu->lock, flags);
774}
775
David Woodhouseb026fd22009-06-28 10:37:25 +0100776static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
Allen Kay4399c8b2011-10-14 12:32:46 -0700777 unsigned long pfn, int target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700778{
David Woodhouseb026fd22009-06-28 10:37:25 +0100779 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700780 struct dma_pte *parent, *pte = NULL;
781 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700782 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700783
784 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200785
786 if (addr_width < BITS_PER_LONG && pfn >> addr_width)
787 /* Address beyond IOMMU's addressing capabilities. */
788 return NULL;
789
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700790 parent = domain->pgd;
791
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700792 while (level > 0) {
793 void *tmp_page;
794
David Woodhouseb026fd22009-06-28 10:37:25 +0100795 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700796 pte = &parent[offset];
Allen Kay4399c8b2011-10-14 12:32:46 -0700797 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100798 break;
799 if (level == target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700800 break;
801
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000802 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100803 uint64_t pteval;
804
Suresh Siddha4c923d42009-10-02 11:01:24 -0700805 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700806
David Woodhouse206a73c12009-07-01 19:30:28 +0100807 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700808 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100809
David Woodhousec85994e2009-07-01 19:21:24 +0100810 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400811 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100812 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
813 /* Someone else set it while we were thinking; use theirs. */
814 free_pgtable_page(tmp_page);
815 } else {
816 dma_pte_addr(pte);
817 domain_flush_cache(domain, pte, sizeof(*pte));
818 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700819 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000820 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700821 level--;
822 }
823
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700824 return pte;
825}
826
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100827
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700828/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100829static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
830 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100831 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700832{
833 struct dma_pte *parent, *pte = NULL;
834 int total = agaw_to_level(domain->agaw);
835 int offset;
836
837 parent = domain->pgd;
838 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100839 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700840 pte = &parent[offset];
841 if (level == total)
842 return pte;
843
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100844 if (!dma_pte_present(pte)) {
845 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700846 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100847 }
848
849 if (pte->val & DMA_PTE_LARGE_PAGE) {
850 *large_page = total;
851 return pte;
852 }
853
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000854 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700855 total--;
856 }
857 return NULL;
858}
859
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700860/* clear last level pte, a tlb flush should be followed */
Allen Kay292827c2011-10-14 12:31:54 -0700861static int dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100862 unsigned long start_pfn,
863 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700864{
David Woodhouse04b18e62009-06-27 19:15:01 +0100865 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100866 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100867 struct dma_pte *first_pte, *pte;
Allen Kay292827c2011-10-14 12:31:54 -0700868 int order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700869
David Woodhouse04b18e62009-06-27 19:15:01 +0100870 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100871 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700872 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100873
David Woodhouse04b18e62009-06-27 19:15:01 +0100874 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700875 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100876 large_page = 1;
877 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100878 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100879 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100880 continue;
881 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100882 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100883 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100884 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100885 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100886 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
887
David Woodhouse310a5ab2009-06-28 18:52:20 +0100888 domain_flush_cache(domain, first_pte,
889 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700890
891 } while (start_pfn && start_pfn <= last_pfn);
Allen Kay292827c2011-10-14 12:31:54 -0700892
893 order = (large_page - 1) * 9;
894 return order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700895}
896
Alex Williamson3269ee02013-06-15 10:27:19 -0600897static void dma_pte_free_level(struct dmar_domain *domain, int level,
898 struct dma_pte *pte, unsigned long pfn,
899 unsigned long start_pfn, unsigned long last_pfn)
900{
901 pfn = max(start_pfn, pfn);
902 pte = &pte[pfn_level_offset(pfn, level)];
903
904 do {
905 unsigned long level_pfn;
906 struct dma_pte *level_pte;
907
908 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
909 goto next;
910
911 level_pfn = pfn & level_mask(level - 1);
912 level_pte = phys_to_virt(dma_pte_addr(pte));
913
914 if (level > 2)
915 dma_pte_free_level(domain, level - 1, level_pte,
916 level_pfn, start_pfn, last_pfn);
917
918 /* If range covers entire pagetable, free it */
919 if (!(start_pfn > level_pfn ||
920 last_pfn < level_pfn + level_size(level))) {
921 dma_clear_pte(pte);
922 domain_flush_cache(domain, pte, sizeof(*pte));
923 free_pgtable_page(level_pte);
924 }
925next:
926 pfn += level_size(level);
927 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
928}
929
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700930/* free page table pages. last level pte should already be cleared */
931static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100932 unsigned long start_pfn,
933 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700934{
David Woodhouse6660c632009-06-27 22:41:00 +0100935 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700936
David Woodhouse6660c632009-06-27 22:41:00 +0100937 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
938 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700939 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700940
David Woodhousef3a0a522009-06-30 03:40:07 +0100941 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -0600942 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
943 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +0100944
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700945 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100946 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700947 free_pgtable_page(domain->pgd);
948 domain->pgd = NULL;
949 }
950}
951
952/* iommu handling */
953static int iommu_alloc_root_entry(struct intel_iommu *iommu)
954{
955 struct root_entry *root;
956 unsigned long flags;
957
Suresh Siddha4c923d42009-10-02 11:01:24 -0700958 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700959 if (!root)
960 return -ENOMEM;
961
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700962 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700963
964 spin_lock_irqsave(&iommu->lock, flags);
965 iommu->root_entry = root;
966 spin_unlock_irqrestore(&iommu->lock, flags);
967
968 return 0;
969}
970
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700971static void iommu_set_root_entry(struct intel_iommu *iommu)
972{
973 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100974 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700975 unsigned long flag;
976
977 addr = iommu->root_entry;
978
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200979 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700980 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
981
David Woodhousec416daa2009-05-10 20:30:58 +0100982 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700983
984 /* Make sure hardware complete it */
985 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100986 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700987
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200988 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700989}
990
991static void iommu_flush_write_buffer(struct intel_iommu *iommu)
992{
993 u32 val;
994 unsigned long flag;
995
David Woodhouse9af88142009-02-13 23:18:03 +0000996 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700997 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700998
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200999 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001000 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001001
1002 /* Make sure hardware complete it */
1003 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001004 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001005
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001006 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001007}
1008
1009/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001010static void __iommu_flush_context(struct intel_iommu *iommu,
1011 u16 did, u16 source_id, u8 function_mask,
1012 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001013{
1014 u64 val = 0;
1015 unsigned long flag;
1016
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001017 switch (type) {
1018 case DMA_CCMD_GLOBAL_INVL:
1019 val = DMA_CCMD_GLOBAL_INVL;
1020 break;
1021 case DMA_CCMD_DOMAIN_INVL:
1022 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1023 break;
1024 case DMA_CCMD_DEVICE_INVL:
1025 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1026 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1027 break;
1028 default:
1029 BUG();
1030 }
1031 val |= DMA_CCMD_ICC;
1032
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001033 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001034 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1035
1036 /* Make sure hardware complete it */
1037 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1038 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1039
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001040 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001041}
1042
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001043/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001044static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1045 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001046{
1047 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1048 u64 val = 0, val_iva = 0;
1049 unsigned long flag;
1050
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001051 switch (type) {
1052 case DMA_TLB_GLOBAL_FLUSH:
1053 /* global flush doesn't need set IVA_REG */
1054 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1055 break;
1056 case DMA_TLB_DSI_FLUSH:
1057 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1058 break;
1059 case DMA_TLB_PSI_FLUSH:
1060 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1061 /* Note: always flush non-leaf currently */
1062 val_iva = size_order | addr;
1063 break;
1064 default:
1065 BUG();
1066 }
1067 /* Note: set drain read/write */
1068#if 0
1069 /*
1070 * This is probably to be super secure.. Looks like we can
1071 * ignore it without any impact.
1072 */
1073 if (cap_read_drain(iommu->cap))
1074 val |= DMA_TLB_READ_DRAIN;
1075#endif
1076 if (cap_write_drain(iommu->cap))
1077 val |= DMA_TLB_WRITE_DRAIN;
1078
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001079 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001080 /* Note: Only uses first TLB reg currently */
1081 if (val_iva)
1082 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1083 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1084
1085 /* Make sure hardware complete it */
1086 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1087 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1088
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001089 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001090
1091 /* check IOTLB invalidation granularity */
1092 if (DMA_TLB_IAIG(val) == 0)
1093 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1094 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1095 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001096 (unsigned long long)DMA_TLB_IIRG(type),
1097 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001098}
1099
Yu Zhao93a23a72009-05-18 13:51:37 +08001100static struct device_domain_info *iommu_support_dev_iotlb(
1101 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001102{
Yu Zhao93a23a72009-05-18 13:51:37 +08001103 int found = 0;
1104 unsigned long flags;
1105 struct device_domain_info *info;
1106 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1107
1108 if (!ecap_dev_iotlb_support(iommu->ecap))
1109 return NULL;
1110
1111 if (!iommu->qi)
1112 return NULL;
1113
1114 spin_lock_irqsave(&device_domain_lock, flags);
1115 list_for_each_entry(info, &domain->devices, link)
1116 if (info->bus == bus && info->devfn == devfn) {
1117 found = 1;
1118 break;
1119 }
1120 spin_unlock_irqrestore(&device_domain_lock, flags);
1121
1122 if (!found || !info->dev)
1123 return NULL;
1124
1125 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1126 return NULL;
1127
1128 if (!dmar_find_matched_atsr_unit(info->dev))
1129 return NULL;
1130
1131 info->iommu = iommu;
1132
1133 return info;
1134}
1135
1136static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1137{
1138 if (!info)
1139 return;
1140
1141 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1142}
1143
1144static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1145{
1146 if (!info->dev || !pci_ats_enabled(info->dev))
1147 return;
1148
1149 pci_disable_ats(info->dev);
1150}
1151
1152static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1153 u64 addr, unsigned mask)
1154{
1155 u16 sid, qdep;
1156 unsigned long flags;
1157 struct device_domain_info *info;
1158
1159 spin_lock_irqsave(&device_domain_lock, flags);
1160 list_for_each_entry(info, &domain->devices, link) {
1161 if (!info->dev || !pci_ats_enabled(info->dev))
1162 continue;
1163
1164 sid = info->bus << 8 | info->devfn;
1165 qdep = pci_ats_queue_depth(info->dev);
1166 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1167 }
1168 spin_unlock_irqrestore(&device_domain_lock, flags);
1169}
1170
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001171static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
Nadav Amit82653632010-04-01 13:24:40 +03001172 unsigned long pfn, unsigned int pages, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001173{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001174 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001175 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001176
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001177 BUG_ON(pages == 0);
1178
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001179 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001180 * Fallback to domain selective flush if no PSI support or the size is
1181 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001182 * PSI requires page size to be 2 ^ x, and the base address is naturally
1183 * aligned to the size
1184 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001185 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1186 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001187 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001188 else
1189 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1190 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001191
1192 /*
Nadav Amit82653632010-04-01 13:24:40 +03001193 * In caching mode, changes of pages from non-present to present require
1194 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001195 */
Nadav Amit82653632010-04-01 13:24:40 +03001196 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001197 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001198}
1199
mark grossf8bab732008-02-08 04:18:38 -08001200static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1201{
1202 u32 pmen;
1203 unsigned long flags;
1204
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001205 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001206 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1207 pmen &= ~DMA_PMEN_EPM;
1208 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1209
1210 /* wait for the protected region status bit to clear */
1211 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1212 readl, !(pmen & DMA_PMEN_PRS), pmen);
1213
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001214 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001215}
1216
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001217static int iommu_enable_translation(struct intel_iommu *iommu)
1218{
1219 u32 sts;
1220 unsigned long flags;
1221
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001222 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001223 iommu->gcmd |= DMA_GCMD_TE;
1224 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001225
1226 /* Make sure hardware complete it */
1227 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001228 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001229
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001230 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001231 return 0;
1232}
1233
1234static int iommu_disable_translation(struct intel_iommu *iommu)
1235{
1236 u32 sts;
1237 unsigned long flag;
1238
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001239 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001240 iommu->gcmd &= ~DMA_GCMD_TE;
1241 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1242
1243 /* Make sure hardware complete it */
1244 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001245 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001246
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001247 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001248 return 0;
1249}
1250
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001251
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001252static int iommu_init_domains(struct intel_iommu *iommu)
1253{
1254 unsigned long ndomains;
1255 unsigned long nlongs;
1256
1257 ndomains = cap_ndoms(iommu->cap);
Jiang Liu852bdb02014-01-06 14:18:11 +08001258 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1259 iommu->seq_id, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001260 nlongs = BITS_TO_LONGS(ndomains);
1261
Donald Dutile94a91b52009-08-20 16:51:34 -04001262 spin_lock_init(&iommu->lock);
1263
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001264 /* TBD: there might be 64K domains,
1265 * consider other allocation for future chip
1266 */
1267 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1268 if (!iommu->domain_ids) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001269 pr_err("IOMMU%d: allocating domain id array failed\n",
1270 iommu->seq_id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001271 return -ENOMEM;
1272 }
1273 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1274 GFP_KERNEL);
1275 if (!iommu->domains) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001276 pr_err("IOMMU%d: allocating domain array failed\n",
1277 iommu->seq_id);
1278 kfree(iommu->domain_ids);
1279 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001280 return -ENOMEM;
1281 }
1282
1283 /*
1284 * if Caching mode is set, then invalid translations are tagged
1285 * with domainid 0. Hence we need to pre-allocate it.
1286 */
1287 if (cap_caching_mode(iommu->cap))
1288 set_bit(0, iommu->domain_ids);
1289 return 0;
1290}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001291
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001292
1293static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001294static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001295
1296void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001297{
1298 struct dmar_domain *domain;
1299 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001300 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001301
Donald Dutile94a91b52009-08-20 16:51:34 -04001302 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001303 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Donald Dutile94a91b52009-08-20 16:51:34 -04001304 domain = iommu->domains[i];
1305 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001306
Donald Dutile94a91b52009-08-20 16:51:34 -04001307 spin_lock_irqsave(&domain->iommu_lock, flags);
1308 if (--domain->iommu_count == 0) {
1309 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1310 vm_domain_exit(domain);
1311 else
1312 domain_exit(domain);
1313 }
1314 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001315 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001316 }
1317
1318 if (iommu->gcmd & DMA_GCMD_TE)
1319 iommu_disable_translation(iommu);
1320
1321 if (iommu->irq) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001322 irq_set_handler_data(iommu->irq, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001323 /* This will mask the irq */
1324 free_irq(iommu->irq, iommu);
1325 destroy_irq(iommu->irq);
1326 }
1327
1328 kfree(iommu->domains);
1329 kfree(iommu->domain_ids);
1330
Weidong Hand9630fe2008-12-08 11:06:32 +08001331 g_iommus[iommu->seq_id] = NULL;
1332
1333 /* if all iommus are freed, free g_iommus */
1334 for (i = 0; i < g_num_of_iommus; i++) {
1335 if (g_iommus[i])
1336 break;
1337 }
1338
1339 if (i == g_num_of_iommus)
1340 kfree(g_iommus);
1341
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001342 /* free context mapping */
1343 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001344}
1345
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001346static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001347{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001348 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001349
1350 domain = alloc_domain_mem();
1351 if (!domain)
1352 return NULL;
1353
Suresh Siddha4c923d42009-10-02 11:01:24 -07001354 domain->nid = -1;
Mike Travis1b198bb2012-03-05 15:05:16 -08001355 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Hand71a2f32008-12-07 21:13:41 +08001356 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001357
1358 return domain;
1359}
1360
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001361static int iommu_attach_domain(struct dmar_domain *domain,
1362 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001363{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001364 int num;
1365 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001366 unsigned long flags;
1367
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001368 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001369
1370 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001371
1372 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1373 if (num >= ndomains) {
1374 spin_unlock_irqrestore(&iommu->lock, flags);
1375 printk(KERN_ERR "IOMMU: no free domain ids\n");
1376 return -ENOMEM;
1377 }
1378
1379 domain->id = num;
1380 set_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001381 set_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001382 iommu->domains[num] = domain;
1383 spin_unlock_irqrestore(&iommu->lock, flags);
1384
1385 return 0;
1386}
1387
1388static void iommu_detach_domain(struct dmar_domain *domain,
1389 struct intel_iommu *iommu)
1390{
1391 unsigned long flags;
1392 int num, ndomains;
1393 int found = 0;
1394
1395 spin_lock_irqsave(&iommu->lock, flags);
1396 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001397 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001398 if (iommu->domains[num] == domain) {
1399 found = 1;
1400 break;
1401 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001402 }
1403
1404 if (found) {
1405 clear_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001406 clear_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001407 iommu->domains[num] = NULL;
1408 }
Weidong Han8c11e792008-12-08 15:29:22 +08001409 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001410}
1411
1412static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001413static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001414
Joseph Cihula51a63e62011-03-21 11:04:24 -07001415static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001416{
1417 struct pci_dev *pdev = NULL;
1418 struct iova *iova;
1419 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001420
David Millerf6611972008-02-06 01:36:23 -08001421 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001422
Mark Gross8a443df2008-03-04 14:59:31 -08001423 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1424 &reserved_rbtree_key);
1425
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001426 /* IOAPIC ranges shouldn't be accessed by DMA */
1427 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1428 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001429 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001430 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001431 return -ENODEV;
1432 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001433
1434 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1435 for_each_pci_dev(pdev) {
1436 struct resource *r;
1437
1438 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1439 r = &pdev->resource[i];
1440 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1441 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001442 iova = reserve_iova(&reserved_iova_list,
1443 IOVA_PFN(r->start),
1444 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001445 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001446 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001447 return -ENODEV;
1448 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001449 }
1450 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001451 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001452}
1453
1454static void domain_reserve_special_ranges(struct dmar_domain *domain)
1455{
1456 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1457}
1458
1459static inline int guestwidth_to_adjustwidth(int gaw)
1460{
1461 int agaw;
1462 int r = (gaw - 12) % 9;
1463
1464 if (r == 0)
1465 agaw = gaw;
1466 else
1467 agaw = gaw + 9 - r;
1468 if (agaw > 64)
1469 agaw = 64;
1470 return agaw;
1471}
1472
1473static int domain_init(struct dmar_domain *domain, int guest_width)
1474{
1475 struct intel_iommu *iommu;
1476 int adjust_width, agaw;
1477 unsigned long sagaw;
1478
David Millerf6611972008-02-06 01:36:23 -08001479 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001480 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001481
1482 domain_reserve_special_ranges(domain);
1483
1484 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001485 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001486 if (guest_width > cap_mgaw(iommu->cap))
1487 guest_width = cap_mgaw(iommu->cap);
1488 domain->gaw = guest_width;
1489 adjust_width = guestwidth_to_adjustwidth(guest_width);
1490 agaw = width_to_agaw(adjust_width);
1491 sagaw = cap_sagaw(iommu->cap);
1492 if (!test_bit(agaw, &sagaw)) {
1493 /* hardware doesn't support it, choose a bigger one */
1494 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1495 agaw = find_next_bit(&sagaw, 5, agaw);
1496 if (agaw >= 5)
1497 return -ENODEV;
1498 }
1499 domain->agaw = agaw;
1500 INIT_LIST_HEAD(&domain->devices);
1501
Weidong Han8e6040972008-12-08 15:49:06 +08001502 if (ecap_coherent(iommu->ecap))
1503 domain->iommu_coherency = 1;
1504 else
1505 domain->iommu_coherency = 0;
1506
Sheng Yang58c610b2009-03-18 15:33:05 +08001507 if (ecap_sc_support(iommu->ecap))
1508 domain->iommu_snooping = 1;
1509 else
1510 domain->iommu_snooping = 0;
1511
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001512 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
Weidong Hanc7151a82008-12-08 22:51:37 +08001513 domain->iommu_count = 1;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001514 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001515
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001516 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001517 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001518 if (!domain->pgd)
1519 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001520 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001521 return 0;
1522}
1523
1524static void domain_exit(struct dmar_domain *domain)
1525{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001526 struct dmar_drhd_unit *drhd;
1527 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001528
1529 /* Domain 0 is reserved, so dont process it */
1530 if (!domain)
1531 return;
1532
Alex Williamson7b668352011-05-24 12:02:41 +01001533 /* Flush any lazy unmaps that may reference this domain */
1534 if (!intel_iommu_strict)
1535 flush_unmaps_timeout(0);
1536
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001537 domain_remove_dev_info(domain);
1538 /* destroy iovas */
1539 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001540
1541 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001542 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001543
1544 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001545 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001546
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001547 for_each_active_iommu(iommu, drhd)
Mike Travis1b198bb2012-03-05 15:05:16 -08001548 if (test_bit(iommu->seq_id, domain->iommu_bmp))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001549 iommu_detach_domain(domain, iommu);
1550
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001551 free_domain_mem(domain);
1552}
1553
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001554static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1555 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001556{
1557 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001558 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001559 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001560 struct dma_pte *pgd;
1561 unsigned long num;
1562 unsigned long ndomains;
1563 int id;
1564 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001565 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001566
1567 pr_debug("Set context mapping for %02x:%02x.%d\n",
1568 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001569
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001570 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001571 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1572 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001573
David Woodhouse276dbf992009-04-04 01:45:37 +01001574 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001575 if (!iommu)
1576 return -ENODEV;
1577
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001578 context = device_to_context_entry(iommu, bus, devfn);
1579 if (!context)
1580 return -ENOMEM;
1581 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001582 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001583 spin_unlock_irqrestore(&iommu->lock, flags);
1584 return 0;
1585 }
1586
Weidong Hanea6606b2008-12-08 23:08:15 +08001587 id = domain->id;
1588 pgd = domain->pgd;
1589
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001590 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1591 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001592 int found = 0;
1593
1594 /* find an available domain id for this device in iommu */
1595 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001596 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001597 if (iommu->domains[num] == domain) {
1598 id = num;
1599 found = 1;
1600 break;
1601 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001602 }
1603
1604 if (found == 0) {
1605 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1606 if (num >= ndomains) {
1607 spin_unlock_irqrestore(&iommu->lock, flags);
1608 printk(KERN_ERR "IOMMU: no free domain ids\n");
1609 return -EFAULT;
1610 }
1611
1612 set_bit(num, iommu->domain_ids);
1613 iommu->domains[num] = domain;
1614 id = num;
1615 }
1616
1617 /* Skip top levels of page tables for
1618 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001619 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001620 */
Chris Wright1672af12009-12-02 12:06:34 -08001621 if (translation != CONTEXT_TT_PASS_THROUGH) {
1622 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1623 pgd = phys_to_virt(dma_pte_addr(pgd));
1624 if (!dma_pte_present(pgd)) {
1625 spin_unlock_irqrestore(&iommu->lock, flags);
1626 return -ENOMEM;
1627 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001628 }
1629 }
1630 }
1631
1632 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001633
Yu Zhao93a23a72009-05-18 13:51:37 +08001634 if (translation != CONTEXT_TT_PASS_THROUGH) {
1635 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1636 translation = info ? CONTEXT_TT_DEV_IOTLB :
1637 CONTEXT_TT_MULTI_LEVEL;
1638 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001639 /*
1640 * In pass through mode, AW must be programmed to indicate the largest
1641 * AGAW value supported by hardware. And ASR is ignored by hardware.
1642 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001643 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001644 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001645 else {
1646 context_set_address_root(context, virt_to_phys(pgd));
1647 context_set_address_width(context, iommu->agaw);
1648 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001649
1650 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001651 context_set_fault_enable(context);
1652 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001653 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001654
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001655 /*
1656 * It's a non-present to present mapping. If hardware doesn't cache
1657 * non-present entry we only need to flush the write-buffer. If the
1658 * _does_ cache non-present entries, then it does so in the special
1659 * domain #0, which we have to flush:
1660 */
1661 if (cap_caching_mode(iommu->cap)) {
1662 iommu->flush.flush_context(iommu, 0,
1663 (((u16)bus) << 8) | devfn,
1664 DMA_CCMD_MASK_NOBIT,
1665 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001666 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001667 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001668 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001669 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001670 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001671 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001672
1673 spin_lock_irqsave(&domain->iommu_lock, flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08001674 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08001675 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001676 if (domain->iommu_count == 1)
1677 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001678 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001679 }
1680 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001681 return 0;
1682}
1683
1684static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001685domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1686 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001687{
1688 int ret;
1689 struct pci_dev *tmp, *parent;
1690
David Woodhouse276dbf992009-04-04 01:45:37 +01001691 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001692 pdev->bus->number, pdev->devfn,
1693 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001694 if (ret)
1695 return ret;
1696
1697 /* dependent device mapping */
1698 tmp = pci_find_upstream_pcie_bridge(pdev);
1699 if (!tmp)
1700 return 0;
1701 /* Secondary interface's bus number and devfn 0 */
1702 parent = pdev->bus->self;
1703 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001704 ret = domain_context_mapping_one(domain,
1705 pci_domain_nr(parent->bus),
1706 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001707 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001708 if (ret)
1709 return ret;
1710 parent = parent->bus->self;
1711 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001712 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001713 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001714 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001715 tmp->subordinate->number, 0,
1716 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001717 else /* this is a legacy PCI bridge */
1718 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001719 pci_domain_nr(tmp->bus),
1720 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001721 tmp->devfn,
1722 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001723}
1724
Weidong Han5331fe62008-12-08 23:00:00 +08001725static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001726{
1727 int ret;
1728 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001729 struct intel_iommu *iommu;
1730
David Woodhouse276dbf992009-04-04 01:45:37 +01001731 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1732 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001733 if (!iommu)
1734 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001735
David Woodhouse276dbf992009-04-04 01:45:37 +01001736 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001737 if (!ret)
1738 return ret;
1739 /* dependent device mapping */
1740 tmp = pci_find_upstream_pcie_bridge(pdev);
1741 if (!tmp)
1742 return ret;
1743 /* Secondary interface's bus number and devfn 0 */
1744 parent = pdev->bus->self;
1745 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001746 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001747 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001748 if (!ret)
1749 return ret;
1750 parent = parent->bus->self;
1751 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001752 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001753 return device_context_mapped(iommu, tmp->subordinate->number,
1754 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001755 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001756 return device_context_mapped(iommu, tmp->bus->number,
1757 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001758}
1759
Fenghua Yuf5329592009-08-04 15:09:37 -07001760/* Returns a number of VTD pages, but aligned to MM page size */
1761static inline unsigned long aligned_nrpages(unsigned long host_addr,
1762 size_t size)
1763{
1764 host_addr &= ~PAGE_MASK;
1765 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1766}
1767
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001768/* Return largest possible superpage level for a given mapping */
1769static inline int hardware_largepage_caps(struct dmar_domain *domain,
1770 unsigned long iov_pfn,
1771 unsigned long phy_pfn,
1772 unsigned long pages)
1773{
1774 int support, level = 1;
1775 unsigned long pfnmerge;
1776
1777 support = domain->iommu_superpage;
1778
1779 /* To use a large page, the virtual *and* physical addresses
1780 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1781 of them will mean we have to use smaller pages. So just
1782 merge them and check both at once. */
1783 pfnmerge = iov_pfn | phy_pfn;
1784
1785 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1786 pages >>= VTD_STRIDE_SHIFT;
1787 if (!pages)
1788 break;
1789 pfnmerge >>= VTD_STRIDE_SHIFT;
1790 level++;
1791 support--;
1792 }
1793 return level;
1794}
1795
David Woodhouse9051aa02009-06-29 12:30:54 +01001796static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1797 struct scatterlist *sg, unsigned long phys_pfn,
1798 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001799{
1800 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001801 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001802 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001803 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001804 unsigned int largepage_lvl = 0;
1805 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001806
1807 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1808
1809 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1810 return -EINVAL;
1811
1812 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1813
David Woodhouse9051aa02009-06-29 12:30:54 +01001814 if (sg)
1815 sg_res = 0;
1816 else {
1817 sg_res = nr_pages + 1;
1818 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1819 }
1820
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001821 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001822 uint64_t tmp;
1823
David Woodhousee1605492009-06-29 11:17:38 +01001824 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001825 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001826 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1827 sg->dma_length = sg->length;
1828 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001829 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001830 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001831
David Woodhousee1605492009-06-29 11:17:38 +01001832 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001833 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1834
1835 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01001836 if (!pte)
1837 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001838 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001839 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001840 pteval |= DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001841 /* Ensure that old small page tables are removed to make room
1842 for superpage, if they exist. */
1843 dma_pte_clear_range(domain, iov_pfn,
1844 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1845 dma_pte_free_pagetable(domain, iov_pfn,
1846 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1847 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001848 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001849 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001850
David Woodhousee1605492009-06-29 11:17:38 +01001851 }
1852 /* We don't need lock here, nobody else
1853 * touches the iova range
1854 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001855 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001856 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001857 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001858 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1859 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001860 if (dumps) {
1861 dumps--;
1862 debug_dma_dump_mappings(NULL);
1863 }
1864 WARN_ON(1);
1865 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001866
1867 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1868
1869 BUG_ON(nr_pages < lvl_pages);
1870 BUG_ON(sg_res < lvl_pages);
1871
1872 nr_pages -= lvl_pages;
1873 iov_pfn += lvl_pages;
1874 phys_pfn += lvl_pages;
1875 pteval += lvl_pages * VTD_PAGE_SIZE;
1876 sg_res -= lvl_pages;
1877
1878 /* If the next PTE would be the first in a new page, then we
1879 need to flush the cache on the entries we've just written.
1880 And then we'll need to recalculate 'pte', so clear it and
1881 let it get set again in the if (!pte) block above.
1882
1883 If we're done (!nr_pages) we need to flush the cache too.
1884
1885 Also if we've been setting superpages, we may need to
1886 recalculate 'pte' and switch back to smaller pages for the
1887 end of the mapping, if the trailing size is not enough to
1888 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01001889 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001890 if (!nr_pages || first_pte_in_page(pte) ||
1891 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01001892 domain_flush_cache(domain, first_pte,
1893 (void *)pte - (void *)first_pte);
1894 pte = NULL;
1895 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001896
1897 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01001898 sg = sg_next(sg);
1899 }
1900 return 0;
1901}
1902
David Woodhouse9051aa02009-06-29 12:30:54 +01001903static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1904 struct scatterlist *sg, unsigned long nr_pages,
1905 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001906{
David Woodhouse9051aa02009-06-29 12:30:54 +01001907 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1908}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001909
David Woodhouse9051aa02009-06-29 12:30:54 +01001910static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1911 unsigned long phys_pfn, unsigned long nr_pages,
1912 int prot)
1913{
1914 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001915}
1916
Weidong Hanc7151a82008-12-08 22:51:37 +08001917static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001918{
Weidong Hanc7151a82008-12-08 22:51:37 +08001919 if (!iommu)
1920 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001921
1922 clear_context_table(iommu, bus, devfn);
1923 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001924 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001925 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001926}
1927
David Woodhouse109b9b02012-05-25 17:43:02 +01001928static inline void unlink_domain_info(struct device_domain_info *info)
1929{
1930 assert_spin_locked(&device_domain_lock);
1931 list_del(&info->link);
1932 list_del(&info->global);
1933 if (info->dev)
1934 info->dev->dev.archdata.iommu = NULL;
1935}
1936
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001937static void domain_remove_dev_info(struct dmar_domain *domain)
1938{
1939 struct device_domain_info *info;
1940 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001941 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001942
1943 spin_lock_irqsave(&device_domain_lock, flags);
1944 while (!list_empty(&domain->devices)) {
1945 info = list_entry(domain->devices.next,
1946 struct device_domain_info, link);
David Woodhouse109b9b02012-05-25 17:43:02 +01001947 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001948 spin_unlock_irqrestore(&device_domain_lock, flags);
1949
Yu Zhao93a23a72009-05-18 13:51:37 +08001950 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001951 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001952 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001953 free_devinfo_mem(info);
1954
1955 spin_lock_irqsave(&device_domain_lock, flags);
1956 }
1957 spin_unlock_irqrestore(&device_domain_lock, flags);
1958}
1959
1960/*
1961 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001962 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001963 */
Kay, Allen M38717942008-09-09 18:37:29 +03001964static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001965find_domain(struct pci_dev *pdev)
1966{
1967 struct device_domain_info *info;
1968
1969 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001970 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001971 if (info)
1972 return info->domain;
1973 return NULL;
1974}
1975
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001976/* domain is initialized */
1977static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1978{
1979 struct dmar_domain *domain, *found = NULL;
1980 struct intel_iommu *iommu;
1981 struct dmar_drhd_unit *drhd;
1982 struct device_domain_info *info, *tmp;
1983 struct pci_dev *dev_tmp;
1984 unsigned long flags;
1985 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001986 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001987 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001988
1989 domain = find_domain(pdev);
1990 if (domain)
1991 return domain;
1992
David Woodhouse276dbf992009-04-04 01:45:37 +01001993 segment = pci_domain_nr(pdev->bus);
1994
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001995 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1996 if (dev_tmp) {
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001997 if (pci_is_pcie(dev_tmp)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001998 bus = dev_tmp->subordinate->number;
1999 devfn = 0;
2000 } else {
2001 bus = dev_tmp->bus->number;
2002 devfn = dev_tmp->devfn;
2003 }
2004 spin_lock_irqsave(&device_domain_lock, flags);
2005 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01002006 if (info->segment == segment &&
2007 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002008 found = info->domain;
2009 break;
2010 }
2011 }
2012 spin_unlock_irqrestore(&device_domain_lock, flags);
2013 /* pcie-pci bridge already has a domain, uses it */
2014 if (found) {
2015 domain = found;
2016 goto found_domain;
2017 }
2018 }
2019
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002020 domain = alloc_domain();
2021 if (!domain)
2022 goto error;
2023
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002024 /* Allocate new domain for the device */
2025 drhd = dmar_find_matched_drhd_unit(pdev);
2026 if (!drhd) {
2027 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
2028 pci_name(pdev));
Julia Lawalld2900bd2012-07-24 16:18:14 +02002029 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002030 return NULL;
2031 }
2032 iommu = drhd->iommu;
2033
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002034 ret = iommu_attach_domain(domain, iommu);
2035 if (ret) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002036 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002037 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002038 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002039
2040 if (domain_init(domain, gaw)) {
2041 domain_exit(domain);
2042 goto error;
2043 }
2044
2045 /* register pcie-to-pci device */
2046 if (dev_tmp) {
2047 info = alloc_devinfo_mem();
2048 if (!info) {
2049 domain_exit(domain);
2050 goto error;
2051 }
David Woodhouse276dbf992009-04-04 01:45:37 +01002052 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002053 info->bus = bus;
2054 info->devfn = devfn;
2055 info->dev = NULL;
2056 info->domain = domain;
2057 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08002058 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002059
2060 /* pcie-to-pci bridge already has a domain, uses it */
2061 found = NULL;
2062 spin_lock_irqsave(&device_domain_lock, flags);
2063 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01002064 if (tmp->segment == segment &&
2065 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002066 found = tmp->domain;
2067 break;
2068 }
2069 }
2070 if (found) {
Jiri Slaby00dfff72010-06-14 17:17:32 +02002071 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002072 free_devinfo_mem(info);
2073 domain_exit(domain);
2074 domain = found;
2075 } else {
2076 list_add(&info->link, &domain->devices);
2077 list_add(&info->global, &device_domain_list);
Jiri Slaby00dfff72010-06-14 17:17:32 +02002078 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002079 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002080 }
2081
2082found_domain:
2083 info = alloc_devinfo_mem();
2084 if (!info)
2085 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01002086 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002087 info->bus = pdev->bus->number;
2088 info->devfn = pdev->devfn;
2089 info->dev = pdev;
2090 info->domain = domain;
2091 spin_lock_irqsave(&device_domain_lock, flags);
2092 /* somebody is fast */
2093 found = find_domain(pdev);
2094 if (found != NULL) {
2095 spin_unlock_irqrestore(&device_domain_lock, flags);
2096 if (found != domain) {
2097 domain_exit(domain);
2098 domain = found;
2099 }
2100 free_devinfo_mem(info);
2101 return domain;
2102 }
2103 list_add(&info->link, &domain->devices);
2104 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002105 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002106 spin_unlock_irqrestore(&device_domain_lock, flags);
2107 return domain;
2108error:
2109 /* recheck it here, maybe others set it */
2110 return find_domain(pdev);
2111}
2112
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002113static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002114#define IDENTMAP_ALL 1
2115#define IDENTMAP_GFX 2
2116#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002117
David Woodhouseb2132032009-06-26 18:50:28 +01002118static int iommu_domain_identity_map(struct dmar_domain *domain,
2119 unsigned long long start,
2120 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002121{
David Woodhousec5395d52009-06-28 16:35:56 +01002122 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2123 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002124
David Woodhousec5395d52009-06-28 16:35:56 +01002125 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2126 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002127 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002128 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002129 }
2130
David Woodhousec5395d52009-06-28 16:35:56 +01002131 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2132 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002133 /*
2134 * RMRR range might have overlap with physical memory range,
2135 * clear it first
2136 */
David Woodhousec5395d52009-06-28 16:35:56 +01002137 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002138
David Woodhousec5395d52009-06-28 16:35:56 +01002139 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2140 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002141 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002142}
2143
2144static int iommu_prepare_identity_map(struct pci_dev *pdev,
2145 unsigned long long start,
2146 unsigned long long end)
2147{
2148 struct dmar_domain *domain;
2149 int ret;
2150
David Woodhousec7ab48d2009-06-26 19:10:36 +01002151 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002152 if (!domain)
2153 return -ENOMEM;
2154
David Woodhouse19943b02009-08-04 16:19:20 +01002155 /* For _hardware_ passthrough, don't bother. But for software
2156 passthrough, we do it anyway -- it may indicate a memory
2157 range which is reserved in E820, so which didn't get set
2158 up to start with in si_domain */
2159 if (domain == si_domain && hw_pass_through) {
2160 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2161 pci_name(pdev), start, end);
2162 return 0;
2163 }
2164
2165 printk(KERN_INFO
2166 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2167 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002168
David Woodhouse5595b522009-12-02 09:21:55 +00002169 if (end < start) {
2170 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2171 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2172 dmi_get_system_info(DMI_BIOS_VENDOR),
2173 dmi_get_system_info(DMI_BIOS_VERSION),
2174 dmi_get_system_info(DMI_PRODUCT_VERSION));
2175 ret = -EIO;
2176 goto error;
2177 }
2178
David Woodhouse2ff729f2009-08-26 14:25:41 +01002179 if (end >> agaw_to_width(domain->agaw)) {
2180 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2181 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2182 agaw_to_width(domain->agaw),
2183 dmi_get_system_info(DMI_BIOS_VENDOR),
2184 dmi_get_system_info(DMI_BIOS_VERSION),
2185 dmi_get_system_info(DMI_PRODUCT_VERSION));
2186 ret = -EIO;
2187 goto error;
2188 }
David Woodhouse19943b02009-08-04 16:19:20 +01002189
David Woodhouseb2132032009-06-26 18:50:28 +01002190 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002191 if (ret)
2192 goto error;
2193
2194 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002195 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002196 if (ret)
2197 goto error;
2198
2199 return 0;
2200
2201 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002202 domain_exit(domain);
2203 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002204}
2205
2206static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2207 struct pci_dev *pdev)
2208{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002209 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002210 return 0;
2211 return iommu_prepare_identity_map(pdev, rmrr->base_address,
David Woodhouse70e535d2011-05-31 00:22:52 +01002212 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002213}
2214
Suresh Siddhad3f13812011-08-23 17:05:25 -07002215#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002216static inline void iommu_prepare_isa(void)
2217{
2218 struct pci_dev *pdev;
2219 int ret;
2220
2221 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2222 if (!pdev)
2223 return;
2224
David Woodhousec7ab48d2009-06-26 19:10:36 +01002225 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse70e535d2011-05-31 00:22:52 +01002226 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002227
2228 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002229 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2230 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002231
2232}
2233#else
2234static inline void iommu_prepare_isa(void)
2235{
2236 return;
2237}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002238#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002239
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002240static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002241
Matt Kraai071e1372009-08-23 22:30:22 -07002242static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002243{
2244 struct dmar_drhd_unit *drhd;
2245 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002246 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002247
2248 si_domain = alloc_domain();
2249 if (!si_domain)
2250 return -EFAULT;
2251
David Woodhousec7ab48d2009-06-26 19:10:36 +01002252 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002253
2254 for_each_active_iommu(iommu, drhd) {
2255 ret = iommu_attach_domain(si_domain, iommu);
2256 if (ret) {
2257 domain_exit(si_domain);
2258 return -EFAULT;
2259 }
2260 }
2261
2262 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2263 domain_exit(si_domain);
2264 return -EFAULT;
2265 }
2266
2267 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2268
David Woodhouse19943b02009-08-04 16:19:20 +01002269 if (hw)
2270 return 0;
2271
David Woodhousec7ab48d2009-06-26 19:10:36 +01002272 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002273 unsigned long start_pfn, end_pfn;
2274 int i;
2275
2276 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2277 ret = iommu_domain_identity_map(si_domain,
2278 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2279 if (ret)
2280 return ret;
2281 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002282 }
2283
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002284 return 0;
2285}
2286
2287static void domain_remove_one_dev_info(struct dmar_domain *domain,
2288 struct pci_dev *pdev);
2289static int identity_mapping(struct pci_dev *pdev)
2290{
2291 struct device_domain_info *info;
2292
2293 if (likely(!iommu_identity_mapping))
2294 return 0;
2295
Mike Traviscb452a42011-05-28 13:15:03 -05002296 info = pdev->dev.archdata.iommu;
2297 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2298 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002299
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002300 return 0;
2301}
2302
2303static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002304 struct pci_dev *pdev,
2305 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002306{
2307 struct device_domain_info *info;
2308 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002309 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002310
2311 info = alloc_devinfo_mem();
2312 if (!info)
2313 return -ENOMEM;
2314
2315 info->segment = pci_domain_nr(pdev->bus);
2316 info->bus = pdev->bus->number;
2317 info->devfn = pdev->devfn;
2318 info->dev = pdev;
2319 info->domain = domain;
2320
2321 spin_lock_irqsave(&device_domain_lock, flags);
2322 list_add(&info->link, &domain->devices);
2323 list_add(&info->global, &device_domain_list);
2324 pdev->dev.archdata.iommu = info;
2325 spin_unlock_irqrestore(&device_domain_lock, flags);
2326
David Woodhousee2ad23d2012-05-25 17:42:54 +01002327 ret = domain_context_mapping(domain, pdev, translation);
2328 if (ret) {
2329 spin_lock_irqsave(&device_domain_lock, flags);
David Woodhouse109b9b02012-05-25 17:43:02 +01002330 unlink_domain_info(info);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002331 spin_unlock_irqrestore(&device_domain_lock, flags);
2332 free_devinfo_mem(info);
2333 return ret;
2334 }
2335
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002336 return 0;
2337}
2338
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002339static bool device_has_rmrr(struct pci_dev *dev)
2340{
2341 struct dmar_rmrr_unit *rmrr;
2342 int i;
2343
2344 for_each_rmrr_units(rmrr) {
2345 for (i = 0; i < rmrr->devices_cnt; i++) {
2346 /*
2347 * Return TRUE if this RMRR contains the device that
2348 * is passed in.
2349 */
2350 if (rmrr->devices[i] == dev)
2351 return true;
2352 }
2353 }
2354 return false;
2355}
2356
David Woodhouse6941af22009-07-04 18:24:27 +01002357static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2358{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002359
2360 /*
2361 * We want to prevent any device associated with an RMRR from
2362 * getting placed into the SI Domain. This is done because
2363 * problems exist when devices are moved in and out of domains
2364 * and their respective RMRR info is lost. We exempt USB devices
2365 * from this process due to their usage of RMRRs that are known
2366 * to not be needed after BIOS hand-off to OS.
2367 */
2368 if (device_has_rmrr(pdev) &&
2369 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2370 return 0;
2371
David Woodhousee0fc7e02009-09-30 09:12:17 -07002372 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2373 return 1;
2374
2375 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2376 return 1;
2377
2378 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2379 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002380
David Woodhouse3dfc8132009-07-04 19:11:08 +01002381 /*
2382 * We want to start off with all devices in the 1:1 domain, and
2383 * take them out later if we find they can't access all of memory.
2384 *
2385 * However, we can't do this for PCI devices behind bridges,
2386 * because all PCI devices behind the same bridge will end up
2387 * with the same source-id on their transactions.
2388 *
2389 * Practically speaking, we can't change things around for these
2390 * devices at run-time, because we can't be sure there'll be no
2391 * DMA transactions in flight for any of their siblings.
2392 *
2393 * So PCI devices (unless they're on the root bus) as well as
2394 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2395 * the 1:1 domain, just in _case_ one of their siblings turns out
2396 * not to be able to map all of memory.
2397 */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002398 if (!pci_is_pcie(pdev)) {
David Woodhouse3dfc8132009-07-04 19:11:08 +01002399 if (!pci_is_root_bus(pdev->bus))
2400 return 0;
2401 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2402 return 0;
Yijing Wang62f87c02012-07-24 17:20:03 +08002403 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
David Woodhouse3dfc8132009-07-04 19:11:08 +01002404 return 0;
2405
2406 /*
2407 * At boot time, we don't yet know if devices will be 64-bit capable.
2408 * Assume that they will -- if they turn out not to be, then we can
2409 * take them out of the 1:1 domain later.
2410 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002411 if (!startup) {
2412 /*
2413 * If the device's dma_mask is less than the system's memory
2414 * size then this is not a candidate for identity mapping.
2415 */
2416 u64 dma_mask = pdev->dma_mask;
2417
2418 if (pdev->dev.coherent_dma_mask &&
2419 pdev->dev.coherent_dma_mask < dma_mask)
2420 dma_mask = pdev->dev.coherent_dma_mask;
2421
2422 return dma_mask >= dma_get_required_mask(&pdev->dev);
2423 }
David Woodhouse6941af22009-07-04 18:24:27 +01002424
2425 return 1;
2426}
2427
Matt Kraai071e1372009-08-23 22:30:22 -07002428static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002429{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002430 struct pci_dev *pdev = NULL;
2431 int ret;
2432
David Woodhouse19943b02009-08-04 16:19:20 +01002433 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002434 if (ret)
2435 return -EFAULT;
2436
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002437 for_each_pci_dev(pdev) {
David Woodhouse6941af22009-07-04 18:24:27 +01002438 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse5fe60f42009-08-09 10:53:41 +01002439 ret = domain_add_dev_info(si_domain, pdev,
Mike Traviseae460b2012-03-05 15:05:16 -08002440 hw ? CONTEXT_TT_PASS_THROUGH :
2441 CONTEXT_TT_MULTI_LEVEL);
2442 if (ret) {
2443 /* device not associated with an iommu */
2444 if (ret == -ENODEV)
2445 continue;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002446 return ret;
Mike Traviseae460b2012-03-05 15:05:16 -08002447 }
2448 pr_info("IOMMU: %s identity mapping for device %s\n",
2449 hw ? "hardware" : "software", pci_name(pdev));
David Woodhouse62edf5d2009-07-04 10:59:46 +01002450 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002451 }
2452
2453 return 0;
2454}
2455
Joseph Cihulab7792602011-05-03 00:08:37 -07002456static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002457{
2458 struct dmar_drhd_unit *drhd;
2459 struct dmar_rmrr_unit *rmrr;
2460 struct pci_dev *pdev;
2461 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002462 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002463
2464 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002465 * for each drhd
2466 * allocate root
2467 * initialize and program root entry to not present
2468 * endfor
2469 */
2470 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002471 /*
2472 * lock not needed as this is only incremented in the single
2473 * threaded kernel __init code path all other access are read
2474 * only
2475 */
Mike Travis1b198bb2012-03-05 15:05:16 -08002476 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2477 g_num_of_iommus++;
2478 continue;
2479 }
2480 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2481 IOMMU_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002482 }
2483
Weidong Hand9630fe2008-12-08 11:06:32 +08002484 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2485 GFP_KERNEL);
2486 if (!g_iommus) {
2487 printk(KERN_ERR "Allocating global iommu array failed\n");
2488 ret = -ENOMEM;
2489 goto error;
2490 }
2491
mark gross80b20dd2008-04-18 13:53:58 -07002492 deferred_flush = kzalloc(g_num_of_iommus *
2493 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2494 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002495 ret = -ENOMEM;
2496 goto error;
2497 }
2498
mark gross5e0d2a62008-03-04 15:22:08 -08002499 for_each_drhd_unit(drhd) {
2500 if (drhd->ignored)
2501 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002502
2503 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002504 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002505
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002506 ret = iommu_init_domains(iommu);
2507 if (ret)
2508 goto error;
2509
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002510 /*
2511 * TBD:
2512 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002513 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002514 */
2515 ret = iommu_alloc_root_entry(iommu);
2516 if (ret) {
2517 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2518 goto error;
2519 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002520 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002521 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002522 }
2523
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002524 /*
2525 * Start from the sane iommu hardware state.
2526 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002527 for_each_drhd_unit(drhd) {
2528 if (drhd->ignored)
2529 continue;
2530
2531 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002532
2533 /*
2534 * If the queued invalidation is already initialized by us
2535 * (for example, while enabling interrupt-remapping) then
2536 * we got the things already rolling from a sane state.
2537 */
2538 if (iommu->qi)
2539 continue;
2540
2541 /*
2542 * Clear any previous faults.
2543 */
2544 dmar_fault(-1, iommu);
2545 /*
2546 * Disable queued invalidation if supported and already enabled
2547 * before OS handover.
2548 */
2549 dmar_disable_qi(iommu);
2550 }
2551
2552 for_each_drhd_unit(drhd) {
2553 if (drhd->ignored)
2554 continue;
2555
2556 iommu = drhd->iommu;
2557
Youquan Songa77b67d2008-10-16 16:31:56 -07002558 if (dmar_enable_qi(iommu)) {
2559 /*
2560 * Queued Invalidate not enabled, use Register Based
2561 * Invalidate
2562 */
2563 iommu->flush.flush_context = __iommu_flush_context;
2564 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002565 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002566 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002567 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002568 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002569 } else {
2570 iommu->flush.flush_context = qi_flush_context;
2571 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002572 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002573 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002574 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002575 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002576 }
2577 }
2578
David Woodhouse19943b02009-08-04 16:19:20 +01002579 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002580 iommu_identity_mapping |= IDENTMAP_ALL;
2581
Suresh Siddhad3f13812011-08-23 17:05:25 -07002582#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002583 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002584#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002585
2586 check_tylersburg_isoch();
2587
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002588 /*
2589 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002590 * identity mappings for rmrr, gfx, and isa and may fall back to static
2591 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002592 */
David Woodhouse19943b02009-08-04 16:19:20 +01002593 if (iommu_identity_mapping) {
2594 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2595 if (ret) {
2596 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2597 goto error;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002598 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002599 }
David Woodhouse19943b02009-08-04 16:19:20 +01002600 /*
2601 * For each rmrr
2602 * for each dev attached to rmrr
2603 * do
2604 * locate drhd for dev, alloc domain for dev
2605 * allocate free domain
2606 * allocate page table entries for rmrr
2607 * if context not allocated for bus
2608 * allocate and init context
2609 * set present in root table for this bus
2610 * init context with domain, translation etc
2611 * endfor
2612 * endfor
2613 */
2614 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2615 for_each_rmrr_units(rmrr) {
2616 for (i = 0; i < rmrr->devices_cnt; i++) {
2617 pdev = rmrr->devices[i];
2618 /*
2619 * some BIOS lists non-exist devices in DMAR
2620 * table.
2621 */
2622 if (!pdev)
2623 continue;
2624 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2625 if (ret)
2626 printk(KERN_ERR
2627 "IOMMU: mapping reserved region failed\n");
2628 }
2629 }
2630
2631 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002632
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002633 /*
2634 * for each drhd
2635 * enable fault log
2636 * global invalidate context cache
2637 * global invalidate iotlb
2638 * enable translation
2639 */
2640 for_each_drhd_unit(drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002641 if (drhd->ignored) {
2642 /*
2643 * we always have to disable PMRs or DMA may fail on
2644 * this device
2645 */
2646 if (force_on)
2647 iommu_disable_protect_mem_regions(drhd->iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002648 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002649 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002650 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002651
2652 iommu_flush_write_buffer(iommu);
2653
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002654 ret = dmar_set_interrupt(iommu);
2655 if (ret)
2656 goto error;
2657
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002658 iommu_set_root_entry(iommu);
2659
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002660 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002661 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002662
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002663 ret = iommu_enable_translation(iommu);
2664 if (ret)
2665 goto error;
David Woodhouseb94996c2009-09-19 15:28:12 -07002666
2667 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002668 }
2669
2670 return 0;
2671error:
2672 for_each_drhd_unit(drhd) {
2673 if (drhd->ignored)
2674 continue;
2675 iommu = drhd->iommu;
2676 free_iommu(iommu);
2677 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002678 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002679 return ret;
2680}
2681
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002682/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002683static struct iova *intel_alloc_iova(struct device *dev,
2684 struct dmar_domain *domain,
2685 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002686{
2687 struct pci_dev *pdev = to_pci_dev(dev);
2688 struct iova *iova = NULL;
2689
David Woodhouse875764d2009-06-28 21:20:51 +01002690 /* Restrict dma_mask to the width that the iommu can handle */
2691 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2692
2693 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002694 /*
2695 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002696 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002697 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002698 */
David Woodhouse875764d2009-06-28 21:20:51 +01002699 iova = alloc_iova(&domain->iovad, nrpages,
2700 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2701 if (iova)
2702 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002703 }
David Woodhouse875764d2009-06-28 21:20:51 +01002704 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2705 if (unlikely(!iova)) {
2706 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2707 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002708 return NULL;
2709 }
2710
2711 return iova;
2712}
2713
David Woodhouse147202a2009-07-07 19:43:20 +01002714static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002715{
2716 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002717 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002718
2719 domain = get_domain_for_dev(pdev,
2720 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2721 if (!domain) {
2722 printk(KERN_ERR
2723 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002724 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002725 }
2726
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002727 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002728 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002729 ret = domain_context_mapping(domain, pdev,
2730 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002731 if (ret) {
2732 printk(KERN_ERR
2733 "Domain context map for %s failed",
2734 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002735 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002736 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002737 }
2738
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002739 return domain;
2740}
2741
David Woodhouse147202a2009-07-07 19:43:20 +01002742static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2743{
2744 struct device_domain_info *info;
2745
2746 /* No lock here, assumes no domain exit in normal case */
2747 info = dev->dev.archdata.iommu;
2748 if (likely(info))
2749 return info->domain;
2750
2751 return __get_valid_domain_for_dev(dev);
2752}
2753
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002754static int iommu_dummy(struct pci_dev *pdev)
2755{
2756 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2757}
2758
2759/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002760static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002761{
David Woodhouse73676832009-07-04 14:08:36 +01002762 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002763 int found;
2764
Yijing Wangdbad0862013-12-05 19:43:42 +08002765 if (unlikely(!dev_is_pci(dev)))
David Woodhouse73676832009-07-04 14:08:36 +01002766 return 1;
2767
2768 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002769 if (iommu_dummy(pdev))
2770 return 1;
2771
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002772 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002773 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002774
2775 found = identity_mapping(pdev);
2776 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002777 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002778 return 1;
2779 else {
2780 /*
2781 * 32 bit DMA is removed from si_domain and fall back
2782 * to non-identity mapping.
2783 */
2784 domain_remove_one_dev_info(si_domain, pdev);
2785 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2786 pci_name(pdev));
2787 return 0;
2788 }
2789 } else {
2790 /*
2791 * In case of a detached 64 bit DMA device from vm, the device
2792 * is put into si_domain for identity mapping.
2793 */
David Woodhouse6941af22009-07-04 18:24:27 +01002794 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002795 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002796 ret = domain_add_dev_info(si_domain, pdev,
2797 hw_pass_through ?
2798 CONTEXT_TT_PASS_THROUGH :
2799 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002800 if (!ret) {
2801 printk(KERN_INFO "64bit %s uses identity mapping\n",
2802 pci_name(pdev));
2803 return 1;
2804 }
2805 }
2806 }
2807
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002808 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002809}
2810
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002811static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2812 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002813{
2814 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002815 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002816 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002817 struct iova *iova;
2818 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002819 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002820 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002821 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002822
2823 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002824
David Woodhouse73676832009-07-04 14:08:36 +01002825 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002826 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002827
2828 domain = get_valid_domain_for_dev(pdev);
2829 if (!domain)
2830 return 0;
2831
Weidong Han8c11e792008-12-08 15:29:22 +08002832 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002833 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002834
Mike Travisc681d0b2011-05-28 13:15:05 -05002835 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002836 if (!iova)
2837 goto error;
2838
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002839 /*
2840 * Check if DMAR supports zero-length reads on write only
2841 * mappings..
2842 */
2843 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002844 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002845 prot |= DMA_PTE_READ;
2846 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2847 prot |= DMA_PTE_WRITE;
2848 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002849 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002850 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002851 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002852 * is not a big problem
2853 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002854 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002855 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002856 if (ret)
2857 goto error;
2858
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002859 /* it's a non-present to present mapping. Only flush if caching mode */
2860 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002861 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002862 else
Weidong Han8c11e792008-12-08 15:29:22 +08002863 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002864
David Woodhouse03d6a242009-06-28 15:33:46 +01002865 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2866 start_paddr += paddr & ~PAGE_MASK;
2867 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002868
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002869error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002870 if (iova)
2871 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002872 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002873 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002874 return 0;
2875}
2876
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002877static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2878 unsigned long offset, size_t size,
2879 enum dma_data_direction dir,
2880 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002881{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002882 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2883 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002884}
2885
mark gross5e0d2a62008-03-04 15:22:08 -08002886static void flush_unmaps(void)
2887{
mark gross80b20dd2008-04-18 13:53:58 -07002888 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002889
mark gross5e0d2a62008-03-04 15:22:08 -08002890 timer_on = 0;
2891
2892 /* just flush them all */
2893 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002894 struct intel_iommu *iommu = g_iommus[i];
2895 if (!iommu)
2896 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002897
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002898 if (!deferred_flush[i].next)
2899 continue;
2900
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002901 /* In caching mode, global flushes turn emulation expensive */
2902 if (!cap_caching_mode(iommu->cap))
2903 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002904 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002905 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002906 unsigned long mask;
2907 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002908 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08002909
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002910 /* On real hardware multiple invalidations are expensive */
2911 if (cap_caching_mode(iommu->cap))
2912 iommu_flush_iotlb_psi(iommu, domain->id,
2913 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2914 else {
2915 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2916 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2917 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2918 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002919 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002920 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002921 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002922 }
2923
mark gross5e0d2a62008-03-04 15:22:08 -08002924 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002925}
2926
2927static void flush_unmaps_timeout(unsigned long data)
2928{
mark gross80b20dd2008-04-18 13:53:58 -07002929 unsigned long flags;
2930
2931 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002932 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002933 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002934}
2935
2936static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2937{
2938 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002939 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002940 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002941
2942 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002943 if (list_size == HIGH_WATER_MARK)
2944 flush_unmaps();
2945
Weidong Han8c11e792008-12-08 15:29:22 +08002946 iommu = domain_get_iommu(dom);
2947 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002948
mark gross80b20dd2008-04-18 13:53:58 -07002949 next = deferred_flush[iommu_id].next;
2950 deferred_flush[iommu_id].domain[next] = dom;
2951 deferred_flush[iommu_id].iova[next] = iova;
2952 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002953
2954 if (!timer_on) {
2955 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2956 timer_on = 1;
2957 }
2958 list_size++;
2959 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2960}
2961
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002962static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2963 size_t size, enum dma_data_direction dir,
2964 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002965{
2966 struct pci_dev *pdev = to_pci_dev(dev);
2967 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002968 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002969 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002970 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002971
David Woodhouse73676832009-07-04 14:08:36 +01002972 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002973 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002974
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002975 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002976 BUG_ON(!domain);
2977
Weidong Han8c11e792008-12-08 15:29:22 +08002978 iommu = domain_get_iommu(domain);
2979
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002980 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002981 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2982 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002983 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002984
David Woodhoused794dc92009-06-28 00:27:49 +01002985 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2986 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002987
David Woodhoused794dc92009-06-28 00:27:49 +01002988 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2989 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002990
2991 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002992 dma_pte_clear_range(domain, start_pfn, last_pfn);
2993
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002994 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002995 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2996
mark gross5e0d2a62008-03-04 15:22:08 -08002997 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002998 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002999 last_pfn - start_pfn + 1, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003000 /* free iova */
3001 __free_iova(&domain->iovad, iova);
3002 } else {
3003 add_unmap(domain, iova);
3004 /*
3005 * queue up the release of the unmap to save the 1/6th of the
3006 * cpu used up by the iotlb flush operation...
3007 */
mark gross5e0d2a62008-03-04 15:22:08 -08003008 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003009}
3010
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003011static void *intel_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003012 dma_addr_t *dma_handle, gfp_t flags,
3013 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003014{
3015 void *vaddr;
3016 int order;
3017
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003018 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003019 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003020
3021 if (!iommu_no_mapping(hwdev))
3022 flags &= ~(GFP_DMA | GFP_DMA32);
3023 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
3024 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
3025 flags |= GFP_DMA;
3026 else
3027 flags |= GFP_DMA32;
3028 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003029
3030 vaddr = (void *)__get_free_pages(flags, order);
3031 if (!vaddr)
3032 return NULL;
3033 memset(vaddr, 0, size);
3034
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003035 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
3036 DMA_BIDIRECTIONAL,
3037 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003038 if (*dma_handle)
3039 return vaddr;
3040 free_pages((unsigned long)vaddr, order);
3041 return NULL;
3042}
3043
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003044static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003045 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003046{
3047 int order;
3048
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003049 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003050 order = get_order(size);
3051
David Woodhouse0db9b7a2009-07-14 02:01:57 +01003052 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003053 free_pages((unsigned long)vaddr, order);
3054}
3055
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003056static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
3057 int nelems, enum dma_data_direction dir,
3058 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003059{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003060 struct pci_dev *pdev = to_pci_dev(hwdev);
3061 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003062 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003063 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003064 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003065
David Woodhouse73676832009-07-04 14:08:36 +01003066 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003067 return;
3068
3069 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08003070 BUG_ON(!domain);
3071
3072 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003073
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003074 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01003075 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3076 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003077 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003078
David Woodhoused794dc92009-06-28 00:27:49 +01003079 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3080 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003081
3082 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01003083 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003084
David Woodhoused794dc92009-06-28 00:27:49 +01003085 /* free page tables */
3086 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3087
David Woodhouseacea0012009-07-14 01:55:11 +01003088 if (intel_iommu_strict) {
3089 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03003090 last_pfn - start_pfn + 1, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003091 /* free iova */
3092 __free_iova(&domain->iovad, iova);
3093 } else {
3094 add_unmap(domain, iova);
3095 /*
3096 * queue up the release of the unmap to save the 1/6th of the
3097 * cpu used up by the iotlb flush operation...
3098 */
3099 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003100}
3101
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003102static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003103 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003104{
3105 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003106 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003107
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003108 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003109 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003110 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003111 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003112 }
3113 return nelems;
3114}
3115
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003116static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3117 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003118{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003119 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003120 struct pci_dev *pdev = to_pci_dev(hwdev);
3121 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003122 size_t size = 0;
3123 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003124 struct iova *iova = NULL;
3125 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003126 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003127 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003128 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003129
3130 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01003131 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003132 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003133
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003134 domain = get_valid_domain_for_dev(pdev);
3135 if (!domain)
3136 return 0;
3137
Weidong Han8c11e792008-12-08 15:29:22 +08003138 iommu = domain_get_iommu(domain);
3139
David Woodhouseb536d242009-06-28 14:49:31 +01003140 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003141 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003142
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003143 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3144 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003145 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003146 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003147 return 0;
3148 }
3149
3150 /*
3151 * Check if DMAR supports zero-length reads on write only
3152 * mappings..
3153 */
3154 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003155 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003156 prot |= DMA_PTE_READ;
3157 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3158 prot |= DMA_PTE_WRITE;
3159
David Woodhouseb536d242009-06-28 14:49:31 +01003160 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003161
Fenghua Yuf5329592009-08-04 15:09:37 -07003162 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003163 if (unlikely(ret)) {
3164 /* clear the page */
3165 dma_pte_clear_range(domain, start_vpfn,
3166 start_vpfn + size - 1);
3167 /* free page tables */
3168 dma_pte_free_pagetable(domain, start_vpfn,
3169 start_vpfn + size - 1);
3170 /* free iova */
3171 __free_iova(&domain->iovad, iova);
3172 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003173 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003174
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003175 /* it's a non-present to present mapping. Only flush if caching mode */
3176 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03003177 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003178 else
Weidong Han8c11e792008-12-08 15:29:22 +08003179 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003180
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003181 return nelems;
3182}
3183
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003184static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3185{
3186 return !dma_addr;
3187}
3188
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003189struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003190 .alloc = intel_alloc_coherent,
3191 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003192 .map_sg = intel_map_sg,
3193 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003194 .map_page = intel_map_page,
3195 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003196 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003197};
3198
3199static inline int iommu_domain_cache_init(void)
3200{
3201 int ret = 0;
3202
3203 iommu_domain_cache = kmem_cache_create("iommu_domain",
3204 sizeof(struct dmar_domain),
3205 0,
3206 SLAB_HWCACHE_ALIGN,
3207
3208 NULL);
3209 if (!iommu_domain_cache) {
3210 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3211 ret = -ENOMEM;
3212 }
3213
3214 return ret;
3215}
3216
3217static inline int iommu_devinfo_cache_init(void)
3218{
3219 int ret = 0;
3220
3221 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3222 sizeof(struct device_domain_info),
3223 0,
3224 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003225 NULL);
3226 if (!iommu_devinfo_cache) {
3227 printk(KERN_ERR "Couldn't create devinfo cache\n");
3228 ret = -ENOMEM;
3229 }
3230
3231 return ret;
3232}
3233
3234static inline int iommu_iova_cache_init(void)
3235{
3236 int ret = 0;
3237
3238 iommu_iova_cache = kmem_cache_create("iommu_iova",
3239 sizeof(struct iova),
3240 0,
3241 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003242 NULL);
3243 if (!iommu_iova_cache) {
3244 printk(KERN_ERR "Couldn't create iova cache\n");
3245 ret = -ENOMEM;
3246 }
3247
3248 return ret;
3249}
3250
3251static int __init iommu_init_mempool(void)
3252{
3253 int ret;
3254 ret = iommu_iova_cache_init();
3255 if (ret)
3256 return ret;
3257
3258 ret = iommu_domain_cache_init();
3259 if (ret)
3260 goto domain_error;
3261
3262 ret = iommu_devinfo_cache_init();
3263 if (!ret)
3264 return ret;
3265
3266 kmem_cache_destroy(iommu_domain_cache);
3267domain_error:
3268 kmem_cache_destroy(iommu_iova_cache);
3269
3270 return -ENOMEM;
3271}
3272
3273static void __init iommu_exit_mempool(void)
3274{
3275 kmem_cache_destroy(iommu_devinfo_cache);
3276 kmem_cache_destroy(iommu_domain_cache);
3277 kmem_cache_destroy(iommu_iova_cache);
3278
3279}
3280
Dan Williams556ab452010-07-23 15:47:56 -07003281static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3282{
3283 struct dmar_drhd_unit *drhd;
3284 u32 vtbar;
3285 int rc;
3286
3287 /* We know that this device on this chipset has its own IOMMU.
3288 * If we find it under a different IOMMU, then the BIOS is lying
3289 * to us. Hope that the IOMMU for this device is actually
3290 * disabled, and it needs no translation...
3291 */
3292 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3293 if (rc) {
3294 /* "can't" happen */
3295 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3296 return;
3297 }
3298 vtbar &= 0xffff0000;
3299
3300 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3301 drhd = dmar_find_matched_drhd_unit(pdev);
3302 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3303 TAINT_FIRMWARE_WORKAROUND,
3304 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3305 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3306}
3307DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3308
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003309static void __init init_no_remapping_devices(void)
3310{
3311 struct dmar_drhd_unit *drhd;
3312
3313 for_each_drhd_unit(drhd) {
3314 if (!drhd->include_all) {
3315 int i;
3316 for (i = 0; i < drhd->devices_cnt; i++)
3317 if (drhd->devices[i] != NULL)
3318 break;
3319 /* ignore DMAR unit if no pci devices exist */
3320 if (i == drhd->devices_cnt)
3321 drhd->ignored = 1;
3322 }
3323 }
3324
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003325 for_each_drhd_unit(drhd) {
3326 int i;
3327 if (drhd->ignored || drhd->include_all)
3328 continue;
3329
3330 for (i = 0; i < drhd->devices_cnt; i++)
3331 if (drhd->devices[i] &&
David Woodhousec0771df2011-10-14 20:59:46 +01003332 !IS_GFX_DEVICE(drhd->devices[i]))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003333 break;
3334
3335 if (i < drhd->devices_cnt)
3336 continue;
3337
David Woodhousec0771df2011-10-14 20:59:46 +01003338 /* This IOMMU has *only* gfx devices. Either bypass it or
3339 set the gfx_mapped flag, as appropriate */
3340 if (dmar_map_gfx) {
3341 intel_iommu_gfx_mapped = 1;
3342 } else {
3343 drhd->ignored = 1;
3344 for (i = 0; i < drhd->devices_cnt; i++) {
3345 if (!drhd->devices[i])
3346 continue;
3347 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3348 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003349 }
3350 }
3351}
3352
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003353#ifdef CONFIG_SUSPEND
3354static int init_iommu_hw(void)
3355{
3356 struct dmar_drhd_unit *drhd;
3357 struct intel_iommu *iommu = NULL;
3358
3359 for_each_active_iommu(iommu, drhd)
3360 if (iommu->qi)
3361 dmar_reenable_qi(iommu);
3362
Joseph Cihulab7792602011-05-03 00:08:37 -07003363 for_each_iommu(iommu, drhd) {
3364 if (drhd->ignored) {
3365 /*
3366 * we always have to disable PMRs or DMA may fail on
3367 * this device
3368 */
3369 if (force_on)
3370 iommu_disable_protect_mem_regions(iommu);
3371 continue;
3372 }
3373
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003374 iommu_flush_write_buffer(iommu);
3375
3376 iommu_set_root_entry(iommu);
3377
3378 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003379 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003380 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003381 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003382 if (iommu_enable_translation(iommu))
3383 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003384 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003385 }
3386
3387 return 0;
3388}
3389
3390static void iommu_flush_all(void)
3391{
3392 struct dmar_drhd_unit *drhd;
3393 struct intel_iommu *iommu;
3394
3395 for_each_active_iommu(iommu, drhd) {
3396 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003397 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003398 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003399 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003400 }
3401}
3402
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003403static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003404{
3405 struct dmar_drhd_unit *drhd;
3406 struct intel_iommu *iommu = NULL;
3407 unsigned long flag;
3408
3409 for_each_active_iommu(iommu, drhd) {
3410 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3411 GFP_ATOMIC);
3412 if (!iommu->iommu_state)
3413 goto nomem;
3414 }
3415
3416 iommu_flush_all();
3417
3418 for_each_active_iommu(iommu, drhd) {
3419 iommu_disable_translation(iommu);
3420
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003421 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003422
3423 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3424 readl(iommu->reg + DMAR_FECTL_REG);
3425 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3426 readl(iommu->reg + DMAR_FEDATA_REG);
3427 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3428 readl(iommu->reg + DMAR_FEADDR_REG);
3429 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3430 readl(iommu->reg + DMAR_FEUADDR_REG);
3431
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003432 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003433 }
3434 return 0;
3435
3436nomem:
3437 for_each_active_iommu(iommu, drhd)
3438 kfree(iommu->iommu_state);
3439
3440 return -ENOMEM;
3441}
3442
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003443static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003444{
3445 struct dmar_drhd_unit *drhd;
3446 struct intel_iommu *iommu = NULL;
3447 unsigned long flag;
3448
3449 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003450 if (force_on)
3451 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3452 else
3453 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003454 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003455 }
3456
3457 for_each_active_iommu(iommu, drhd) {
3458
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003459 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003460
3461 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3462 iommu->reg + DMAR_FECTL_REG);
3463 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3464 iommu->reg + DMAR_FEDATA_REG);
3465 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3466 iommu->reg + DMAR_FEADDR_REG);
3467 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3468 iommu->reg + DMAR_FEUADDR_REG);
3469
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003470 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003471 }
3472
3473 for_each_active_iommu(iommu, drhd)
3474 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003475}
3476
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003477static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003478 .resume = iommu_resume,
3479 .suspend = iommu_suspend,
3480};
3481
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003482static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003483{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003484 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003485}
3486
3487#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003488static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003489#endif /* CONFIG_PM */
3490
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003491LIST_HEAD(dmar_rmrr_units);
3492
3493static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3494{
3495 list_add(&rmrr->list, &dmar_rmrr_units);
3496}
3497
3498
3499int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3500{
3501 struct acpi_dmar_reserved_memory *rmrr;
3502 struct dmar_rmrr_unit *rmrru;
3503
3504 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3505 if (!rmrru)
3506 return -ENOMEM;
3507
3508 rmrru->hdr = header;
3509 rmrr = (struct acpi_dmar_reserved_memory *)header;
3510 rmrru->base_address = rmrr->base_address;
3511 rmrru->end_address = rmrr->end_address;
3512
3513 dmar_register_rmrr_unit(rmrru);
3514 return 0;
3515}
3516
3517static int __init
3518rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3519{
3520 struct acpi_dmar_reserved_memory *rmrr;
3521 int ret;
3522
3523 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3524 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3525 ((void *)rmrr) + rmrr->header.length,
3526 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3527
3528 if (ret || (rmrru->devices_cnt == 0)) {
3529 list_del(&rmrru->list);
3530 kfree(rmrru);
3531 }
3532 return ret;
3533}
3534
3535static LIST_HEAD(dmar_atsr_units);
3536
3537int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3538{
3539 struct acpi_dmar_atsr *atsr;
3540 struct dmar_atsr_unit *atsru;
3541
3542 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3543 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3544 if (!atsru)
3545 return -ENOMEM;
3546
3547 atsru->hdr = hdr;
3548 atsru->include_all = atsr->flags & 0x1;
3549
3550 list_add(&atsru->list, &dmar_atsr_units);
3551
3552 return 0;
3553}
3554
3555static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3556{
3557 int rc;
3558 struct acpi_dmar_atsr *atsr;
3559
3560 if (atsru->include_all)
3561 return 0;
3562
3563 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3564 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3565 (void *)atsr + atsr->header.length,
3566 &atsru->devices_cnt, &atsru->devices,
3567 atsr->segment);
3568 if (rc || !atsru->devices_cnt) {
3569 list_del(&atsru->list);
3570 kfree(atsru);
3571 }
3572
3573 return rc;
3574}
3575
3576int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3577{
3578 int i;
3579 struct pci_bus *bus;
3580 struct acpi_dmar_atsr *atsr;
3581 struct dmar_atsr_unit *atsru;
3582
3583 dev = pci_physfn(dev);
3584
3585 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3586 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3587 if (atsr->segment == pci_domain_nr(dev->bus))
3588 goto found;
3589 }
3590
3591 return 0;
3592
3593found:
3594 for (bus = dev->bus; bus; bus = bus->parent) {
3595 struct pci_dev *bridge = bus->self;
3596
3597 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003598 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003599 return 0;
3600
Yijing Wang62f87c02012-07-24 17:20:03 +08003601 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003602 for (i = 0; i < atsru->devices_cnt; i++)
3603 if (atsru->devices[i] == bridge)
3604 return 1;
3605 break;
3606 }
3607 }
3608
3609 if (atsru->include_all)
3610 return 1;
3611
3612 return 0;
3613}
3614
Sergey Senozhatskyc8f369a2011-10-26 18:45:39 +03003615int __init dmar_parse_rmrr_atsr_dev(void)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003616{
3617 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3618 struct dmar_atsr_unit *atsr, *atsr_n;
3619 int ret = 0;
3620
3621 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3622 ret = rmrr_parse_dev(rmrr);
3623 if (ret)
3624 return ret;
3625 }
3626
3627 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3628 ret = atsr_parse_dev(atsr);
3629 if (ret)
3630 return ret;
3631 }
3632
3633 return ret;
3634}
3635
Fenghua Yu99dcade2009-11-11 07:23:06 -08003636/*
3637 * Here we only respond to action of unbound device from driver.
3638 *
3639 * Added device is not attached to its DMAR domain here yet. That will happen
3640 * when mapping the device to iova.
3641 */
3642static int device_notifier(struct notifier_block *nb,
3643 unsigned long action, void *data)
3644{
3645 struct device *dev = data;
3646 struct pci_dev *pdev = to_pci_dev(dev);
3647 struct dmar_domain *domain;
3648
David Woodhouse44cd6132009-12-02 10:18:30 +00003649 if (iommu_no_mapping(dev))
3650 return 0;
3651
Fenghua Yu99dcade2009-11-11 07:23:06 -08003652 domain = find_domain(pdev);
3653 if (!domain)
3654 return 0;
3655
Alex Williamsona97590e2011-03-04 14:52:16 -07003656 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
Fenghua Yu99dcade2009-11-11 07:23:06 -08003657 domain_remove_one_dev_info(domain, pdev);
3658
Alex Williamsona97590e2011-03-04 14:52:16 -07003659 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3660 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3661 list_empty(&domain->devices))
3662 domain_exit(domain);
3663 }
3664
Fenghua Yu99dcade2009-11-11 07:23:06 -08003665 return 0;
3666}
3667
3668static struct notifier_block device_nb = {
3669 .notifier_call = device_notifier,
3670};
3671
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003672int __init intel_iommu_init(void)
3673{
3674 int ret = 0;
Takao Indoh3a93c842013-04-23 17:35:03 +09003675 struct dmar_drhd_unit *drhd;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003676
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003677 /* VT-d is required for a TXT/tboot launch, so enforce that */
3678 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003679
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003680 if (dmar_table_init()) {
3681 if (force_on)
3682 panic("tboot: Failed to initialize DMAR table\n");
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003683 return -ENODEV;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003684 }
3685
Takao Indoh3a93c842013-04-23 17:35:03 +09003686 /*
3687 * Disable translation if already enabled prior to OS handover.
3688 */
3689 for_each_drhd_unit(drhd) {
3690 struct intel_iommu *iommu;
3691
3692 if (drhd->ignored)
3693 continue;
3694
3695 iommu = drhd->iommu;
3696 if (iommu->gcmd & DMA_GCMD_TE)
3697 iommu_disable_translation(iommu);
3698 }
3699
Suresh Siddhac2c72862011-08-23 17:05:19 -07003700 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003701 if (force_on)
3702 panic("tboot: Failed to initialize DMAR device scope\n");
3703 return -ENODEV;
3704 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003705
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003706 if (no_iommu || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003707 return -ENODEV;
3708
Joseph Cihula51a63e62011-03-21 11:04:24 -07003709 if (iommu_init_mempool()) {
3710 if (force_on)
3711 panic("tboot: Failed to initialize iommu memory\n");
3712 return -ENODEV;
3713 }
3714
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003715 if (list_empty(&dmar_rmrr_units))
3716 printk(KERN_INFO "DMAR: No RMRR found\n");
3717
3718 if (list_empty(&dmar_atsr_units))
3719 printk(KERN_INFO "DMAR: No ATSR found\n");
3720
Joseph Cihula51a63e62011-03-21 11:04:24 -07003721 if (dmar_init_reserved_ranges()) {
3722 if (force_on)
3723 panic("tboot: Failed to reserve iommu ranges\n");
3724 return -ENODEV;
3725 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003726
3727 init_no_remapping_devices();
3728
Joseph Cihulab7792602011-05-03 00:08:37 -07003729 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003730 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003731 if (force_on)
3732 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003733 printk(KERN_ERR "IOMMU: dmar init failed\n");
3734 put_iova_domain(&reserved_iova_list);
3735 iommu_exit_mempool();
3736 return ret;
3737 }
3738 printk(KERN_INFO
3739 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3740
mark gross5e0d2a62008-03-04 15:22:08 -08003741 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003742#ifdef CONFIG_SWIOTLB
3743 swiotlb = 0;
3744#endif
David Woodhouse19943b02009-08-04 16:19:20 +01003745 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003746
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003747 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003748
Joerg Roedel4236d97d2011-09-06 17:56:07 +02003749 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003750
Fenghua Yu99dcade2009-11-11 07:23:06 -08003751 bus_register_notifier(&pci_bus_type, &device_nb);
3752
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02003753 intel_iommu_enabled = 1;
3754
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003755 return 0;
3756}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003757
Han, Weidong3199aa62009-02-26 17:31:12 +08003758static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3759 struct pci_dev *pdev)
3760{
3761 struct pci_dev *tmp, *parent;
3762
3763 if (!iommu || !pdev)
3764 return;
3765
3766 /* dependent device detach */
3767 tmp = pci_find_upstream_pcie_bridge(pdev);
3768 /* Secondary interface's bus number and devfn 0 */
3769 if (tmp) {
3770 parent = pdev->bus->self;
3771 while (parent != tmp) {
3772 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003773 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003774 parent = parent->bus->self;
3775 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05003776 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08003777 iommu_detach_dev(iommu,
3778 tmp->subordinate->number, 0);
3779 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003780 iommu_detach_dev(iommu, tmp->bus->number,
3781 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003782 }
3783}
3784
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003785static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003786 struct pci_dev *pdev)
3787{
Yijing Wangbca2b912013-10-31 17:26:04 +08003788 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08003789 struct intel_iommu *iommu;
3790 unsigned long flags;
3791 int found = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +08003792
David Woodhouse276dbf992009-04-04 01:45:37 +01003793 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3794 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003795 if (!iommu)
3796 return;
3797
3798 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08003799 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
Mike Habeck8519dc42011-05-28 13:15:07 -05003800 if (info->segment == pci_domain_nr(pdev->bus) &&
3801 info->bus == pdev->bus->number &&
Weidong Hanc7151a82008-12-08 22:51:37 +08003802 info->devfn == pdev->devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01003803 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003804 spin_unlock_irqrestore(&device_domain_lock, flags);
3805
Yu Zhao93a23a72009-05-18 13:51:37 +08003806 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003807 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003808 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003809 free_devinfo_mem(info);
3810
3811 spin_lock_irqsave(&device_domain_lock, flags);
3812
3813 if (found)
3814 break;
3815 else
3816 continue;
3817 }
3818
3819 /* if there is no other devices under the same iommu
3820 * owned by this domain, clear this iommu in iommu_bmp
3821 * update iommu count and coherency
3822 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003823 if (iommu == device_to_iommu(info->segment, info->bus,
3824 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003825 found = 1;
3826 }
3827
Roland Dreier3e7abe22011-07-20 06:22:21 -07003828 spin_unlock_irqrestore(&device_domain_lock, flags);
3829
Weidong Hanc7151a82008-12-08 22:51:37 +08003830 if (found == 0) {
3831 unsigned long tmp_flags;
3832 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08003833 clear_bit(iommu->seq_id, domain->iommu_bmp);
Weidong Hanc7151a82008-12-08 22:51:37 +08003834 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003835 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003836 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07003837
Alex Williamson9b4554b2011-05-24 12:19:04 -04003838 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3839 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3840 spin_lock_irqsave(&iommu->lock, tmp_flags);
3841 clear_bit(domain->id, iommu->domain_ids);
3842 iommu->domains[domain->id] = NULL;
3843 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3844 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003845 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003846}
3847
3848static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3849{
3850 struct device_domain_info *info;
3851 struct intel_iommu *iommu;
3852 unsigned long flags1, flags2;
3853
3854 spin_lock_irqsave(&device_domain_lock, flags1);
3855 while (!list_empty(&domain->devices)) {
3856 info = list_entry(domain->devices.next,
3857 struct device_domain_info, link);
David Woodhouse109b9b02012-05-25 17:43:02 +01003858 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003859 spin_unlock_irqrestore(&device_domain_lock, flags1);
3860
Yu Zhao93a23a72009-05-18 13:51:37 +08003861 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003862 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003863 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003864 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003865
3866 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003867 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003868 */
3869 spin_lock_irqsave(&domain->iommu_lock, flags2);
3870 if (test_and_clear_bit(iommu->seq_id,
Mike Travis1b198bb2012-03-05 15:05:16 -08003871 domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08003872 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003873 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003874 }
3875 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3876
3877 free_devinfo_mem(info);
3878 spin_lock_irqsave(&device_domain_lock, flags1);
3879 }
3880 spin_unlock_irqrestore(&device_domain_lock, flags1);
3881}
3882
Weidong Han5e98c4b2008-12-08 23:03:27 +08003883/* domain id for virtual machine, it won't be set in context */
Jiang Liu18d99162014-01-06 14:18:10 +08003884static atomic_t vm_domid = ATOMIC_INIT(0);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003885
3886static struct dmar_domain *iommu_alloc_vm_domain(void)
3887{
3888 struct dmar_domain *domain;
3889
3890 domain = alloc_domain_mem();
3891 if (!domain)
3892 return NULL;
3893
Jiang Liu18d99162014-01-06 14:18:10 +08003894 domain->id = atomic_inc_return(&vm_domid);
Suresh Siddha4c923d42009-10-02 11:01:24 -07003895 domain->nid = -1;
Mike Travis1b198bb2012-03-05 15:05:16 -08003896 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003897 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3898
3899 return domain;
3900}
3901
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003902static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003903{
3904 int adjust_width;
3905
3906 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003907 spin_lock_init(&domain->iommu_lock);
3908
3909 domain_reserve_special_ranges(domain);
3910
3911 /* calculate AGAW */
3912 domain->gaw = guest_width;
3913 adjust_width = guestwidth_to_adjustwidth(guest_width);
3914 domain->agaw = width_to_agaw(adjust_width);
3915
3916 INIT_LIST_HEAD(&domain->devices);
3917
3918 domain->iommu_count = 0;
3919 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003920 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01003921 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003922 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003923 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003924
3925 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07003926 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003927 if (!domain->pgd)
3928 return -ENOMEM;
3929 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3930 return 0;
3931}
3932
3933static void iommu_free_vm_domain(struct dmar_domain *domain)
3934{
3935 unsigned long flags;
3936 struct dmar_drhd_unit *drhd;
3937 struct intel_iommu *iommu;
3938 unsigned long i;
3939 unsigned long ndomains;
3940
3941 for_each_drhd_unit(drhd) {
3942 if (drhd->ignored)
3943 continue;
3944 iommu = drhd->iommu;
3945
3946 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08003947 for_each_set_bit(i, iommu->domain_ids, ndomains) {
Weidong Han5e98c4b2008-12-08 23:03:27 +08003948 if (iommu->domains[i] == domain) {
3949 spin_lock_irqsave(&iommu->lock, flags);
3950 clear_bit(i, iommu->domain_ids);
3951 iommu->domains[i] = NULL;
3952 spin_unlock_irqrestore(&iommu->lock, flags);
3953 break;
3954 }
Weidong Han5e98c4b2008-12-08 23:03:27 +08003955 }
3956 }
3957}
3958
3959static void vm_domain_exit(struct dmar_domain *domain)
3960{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003961 /* Domain 0 is reserved, so dont process it */
3962 if (!domain)
3963 return;
3964
3965 vm_domain_remove_all_dev_info(domain);
3966 /* destroy iovas */
3967 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003968
3969 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003970 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003971
3972 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003973 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003974
3975 iommu_free_vm_domain(domain);
3976 free_domain_mem(domain);
3977}
3978
Joerg Roedel5d450802008-12-03 14:52:32 +01003979static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003980{
Joerg Roedel5d450802008-12-03 14:52:32 +01003981 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003982
Joerg Roedel5d450802008-12-03 14:52:32 +01003983 dmar_domain = iommu_alloc_vm_domain();
3984 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003985 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003986 "intel_iommu_domain_init: dmar_domain == NULL\n");
3987 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003988 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003989 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003990 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003991 "intel_iommu_domain_init() failed\n");
3992 vm_domain_exit(dmar_domain);
3993 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003994 }
Allen Kay8140a952011-10-14 12:32:17 -07003995 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01003996 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003997
Joerg Roedel8a0e7152012-01-26 19:40:54 +01003998 domain->geometry.aperture_start = 0;
3999 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4000 domain->geometry.force_aperture = true;
4001
Joerg Roedel5d450802008-12-03 14:52:32 +01004002 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004003}
Kay, Allen M38717942008-09-09 18:37:29 +03004004
Joerg Roedel5d450802008-12-03 14:52:32 +01004005static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004006{
Joerg Roedel5d450802008-12-03 14:52:32 +01004007 struct dmar_domain *dmar_domain = domain->priv;
4008
4009 domain->priv = NULL;
4010 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004011}
Kay, Allen M38717942008-09-09 18:37:29 +03004012
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004013static int intel_iommu_attach_device(struct iommu_domain *domain,
4014 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004015{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004016 struct dmar_domain *dmar_domain = domain->priv;
4017 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004018 struct intel_iommu *iommu;
4019 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03004020
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004021 /* normally pdev is not mapped */
4022 if (unlikely(domain_context_mapped(pdev))) {
4023 struct dmar_domain *old_domain;
4024
4025 old_domain = find_domain(pdev);
4026 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004027 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
4028 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
4029 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004030 else
4031 domain_remove_dev_info(old_domain);
4032 }
4033 }
4034
David Woodhouse276dbf992009-04-04 01:45:37 +01004035 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
4036 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004037 if (!iommu)
4038 return -ENODEV;
4039
4040 /* check if this iommu agaw is sufficient for max mapped address */
4041 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004042 if (addr_width > cap_mgaw(iommu->cap))
4043 addr_width = cap_mgaw(iommu->cap);
4044
4045 if (dmar_domain->max_addr > (1LL << addr_width)) {
4046 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004047 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004048 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004049 return -EFAULT;
4050 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004051 dmar_domain->gaw = addr_width;
4052
4053 /*
4054 * Knock out extra levels of page tables if necessary
4055 */
4056 while (iommu->agaw < dmar_domain->agaw) {
4057 struct dma_pte *pte;
4058
4059 pte = dmar_domain->pgd;
4060 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004061 dmar_domain->pgd = (struct dma_pte *)
4062 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004063 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004064 }
4065 dmar_domain->agaw--;
4066 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004067
David Woodhouse5fe60f42009-08-09 10:53:41 +01004068 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004069}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004070
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004071static void intel_iommu_detach_device(struct iommu_domain *domain,
4072 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004073{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004074 struct dmar_domain *dmar_domain = domain->priv;
4075 struct pci_dev *pdev = to_pci_dev(dev);
4076
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004077 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03004078}
Kay, Allen M38717942008-09-09 18:37:29 +03004079
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004080static int intel_iommu_map(struct iommu_domain *domain,
4081 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004082 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004083{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004084 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004085 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004086 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004087 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004088
Joerg Roedeldde57a22008-12-03 15:04:09 +01004089 if (iommu_prot & IOMMU_READ)
4090 prot |= DMA_PTE_READ;
4091 if (iommu_prot & IOMMU_WRITE)
4092 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004093 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4094 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004095
David Woodhouse163cc522009-06-28 00:51:17 +01004096 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004097 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004098 u64 end;
4099
4100 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004101 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004102 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004103 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004104 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004105 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004106 return -EFAULT;
4107 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004108 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004109 }
David Woodhousead051222009-06-28 14:22:28 +01004110 /* Round up size to next multiple of PAGE_SIZE, if it and
4111 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004112 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004113 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4114 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004115 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004116}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004117
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004118static size_t intel_iommu_unmap(struct iommu_domain *domain,
4119 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004120{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004121 struct dmar_domain *dmar_domain = domain->priv;
Allen Kay292827c2011-10-14 12:31:54 -07004122 int order;
Sheng Yang4b99d352009-07-08 11:52:52 +01004123
Allen Kay292827c2011-10-14 12:31:54 -07004124 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
David Woodhouse163cc522009-06-28 00:51:17 +01004125 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004126
David Woodhouse163cc522009-06-28 00:51:17 +01004127 if (dmar_domain->max_addr == iova + size)
4128 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004129
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004130 return PAGE_SIZE << order;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004131}
Kay, Allen M38717942008-09-09 18:37:29 +03004132
Joerg Roedeld14d6572008-12-03 15:06:57 +01004133static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05304134 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004135{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004136 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004137 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004138 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004139
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004140 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
Kay, Allen M38717942008-09-09 18:37:29 +03004141 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004142 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004143
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004144 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004145}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004146
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004147static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4148 unsigned long cap)
4149{
4150 struct dmar_domain *dmar_domain = domain->priv;
4151
4152 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4153 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004154 if (cap == IOMMU_CAP_INTR_REMAP)
Suresh Siddha95a02e92012-03-30 11:47:07 -07004155 return irq_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004156
4157 return 0;
4158}
4159
Alex Williamson783f1572012-05-30 14:19:43 -06004160#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4161
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004162static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004163{
4164 struct pci_dev *pdev = to_pci_dev(dev);
Alex Williamson3da4af0a2012-11-13 10:22:03 -07004165 struct pci_dev *bridge, *dma_pdev = NULL;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004166 struct iommu_group *group;
4167 int ret;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004168
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004169 if (!device_to_iommu(pci_domain_nr(pdev->bus),
4170 pdev->bus->number, pdev->devfn))
Alex Williamson70ae6f02011-10-21 15:56:11 -04004171 return -ENODEV;
4172
4173 bridge = pci_find_upstream_pcie_bridge(pdev);
4174 if (bridge) {
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004175 if (pci_is_pcie(bridge))
4176 dma_pdev = pci_get_domain_bus_and_slot(
4177 pci_domain_nr(pdev->bus),
4178 bridge->subordinate->number, 0);
Alex Williamson3da4af0a2012-11-13 10:22:03 -07004179 if (!dma_pdev)
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004180 dma_pdev = pci_dev_get(bridge);
4181 } else
4182 dma_pdev = pci_dev_get(pdev);
4183
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004184 /* Account for quirked devices */
Alex Williamson783f1572012-05-30 14:19:43 -06004185 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
4186
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004187 /*
4188 * If it's a multifunction device that does not support our
Alex Williamsonc14d2692013-05-30 12:39:18 -06004189 * required ACS flags, add to the same group as lowest numbered
4190 * function that also does not suport the required ACS flags.
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004191 */
Alex Williamson783f1572012-05-30 14:19:43 -06004192 if (dma_pdev->multifunction &&
Alex Williamsonc14d2692013-05-30 12:39:18 -06004193 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
4194 u8 i, slot = PCI_SLOT(dma_pdev->devfn);
4195
4196 for (i = 0; i < 8; i++) {
4197 struct pci_dev *tmp;
4198
4199 tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
4200 if (!tmp)
4201 continue;
4202
4203 if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
4204 swap_pci_ref(&dma_pdev, tmp);
4205 break;
4206 }
4207 pci_dev_put(tmp);
4208 }
4209 }
Alex Williamson783f1572012-05-30 14:19:43 -06004210
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004211 /*
4212 * Devices on the root bus go through the iommu. If that's not us,
4213 * find the next upstream device and test ACS up to the root bus.
4214 * Finding the next device may require skipping virtual buses.
4215 */
Alex Williamson783f1572012-05-30 14:19:43 -06004216 while (!pci_is_root_bus(dma_pdev->bus)) {
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004217 struct pci_bus *bus = dma_pdev->bus;
4218
4219 while (!bus->self) {
4220 if (!pci_is_root_bus(bus))
4221 bus = bus->parent;
4222 else
4223 goto root_bus;
4224 }
4225
4226 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
Alex Williamson783f1572012-05-30 14:19:43 -06004227 break;
4228
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004229 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
Alex Williamson70ae6f02011-10-21 15:56:11 -04004230 }
4231
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004232root_bus:
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004233 group = iommu_group_get(&dma_pdev->dev);
4234 pci_dev_put(dma_pdev);
4235 if (!group) {
4236 group = iommu_group_alloc();
4237 if (IS_ERR(group))
4238 return PTR_ERR(group);
4239 }
Alex Williamsonbcb71ab2011-10-21 15:56:24 -04004240
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004241 ret = iommu_group_add_device(group, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004242
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004243 iommu_group_put(group);
4244 return ret;
4245}
4246
4247static void intel_iommu_remove_device(struct device *dev)
4248{
4249 iommu_group_remove_device(dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004250}
4251
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004252static struct iommu_ops intel_iommu_ops = {
4253 .domain_init = intel_iommu_domain_init,
4254 .domain_destroy = intel_iommu_domain_destroy,
4255 .attach_dev = intel_iommu_attach_device,
4256 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004257 .map = intel_iommu_map,
4258 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004259 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004260 .domain_has_cap = intel_iommu_domain_has_cap,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004261 .add_device = intel_iommu_add_device,
4262 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004263 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004264};
David Woodhouse9af88142009-02-13 23:18:03 +00004265
Daniel Vetter94526182013-01-20 23:50:13 +01004266static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4267{
4268 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4269 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4270 dmar_map_gfx = 0;
4271}
4272
4273DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4274DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4275DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4276DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4277DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4278DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4279DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4280
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004281static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004282{
4283 /*
4284 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004285 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004286 */
4287 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4288 rwbf_quirk = 1;
4289}
4290
4291DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004292DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4293DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4294DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4295DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4296DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4297DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004298
Adam Jacksoneecfd572010-08-25 21:17:34 +01004299#define GGC 0x52
4300#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4301#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4302#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4303#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4304#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4305#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4306#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4307#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4308
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004309static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004310{
4311 unsigned short ggc;
4312
Adam Jacksoneecfd572010-08-25 21:17:34 +01004313 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004314 return;
4315
Adam Jacksoneecfd572010-08-25 21:17:34 +01004316 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004317 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4318 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004319 } else if (dmar_map_gfx) {
4320 /* we have to ensure the gfx device is idle before we flush */
4321 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4322 intel_iommu_strict = 1;
4323 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004324}
4325DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4326DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4327DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4328DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4329
David Woodhousee0fc7e02009-09-30 09:12:17 -07004330/* On Tylersburg chipsets, some BIOSes have been known to enable the
4331 ISOCH DMAR unit for the Azalia sound device, but not give it any
4332 TLB entries, which causes it to deadlock. Check for that. We do
4333 this in a function called from init_dmars(), instead of in a PCI
4334 quirk, because we don't want to print the obnoxious "BIOS broken"
4335 message if VT-d is actually disabled.
4336*/
4337static void __init check_tylersburg_isoch(void)
4338{
4339 struct pci_dev *pdev;
4340 uint32_t vtisochctrl;
4341
4342 /* If there's no Azalia in the system anyway, forget it. */
4343 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4344 if (!pdev)
4345 return;
4346 pci_dev_put(pdev);
4347
4348 /* System Management Registers. Might be hidden, in which case
4349 we can't do the sanity check. But that's OK, because the
4350 known-broken BIOSes _don't_ actually hide it, so far. */
4351 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4352 if (!pdev)
4353 return;
4354
4355 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4356 pci_dev_put(pdev);
4357 return;
4358 }
4359
4360 pci_dev_put(pdev);
4361
4362 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4363 if (vtisochctrl & 1)
4364 return;
4365
4366 /* Drop all bits other than the number of TLB entries */
4367 vtisochctrl &= 0x1c;
4368
4369 /* If we have the recommended number of TLB entries (16), fine. */
4370 if (vtisochctrl == 0x10)
4371 return;
4372
4373 /* Zero TLB entries? You get to ride the short bus to school. */
4374 if (!vtisochctrl) {
4375 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4376 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4377 dmi_get_system_info(DMI_BIOS_VENDOR),
4378 dmi_get_system_info(DMI_BIOS_VERSION),
4379 dmi_get_system_info(DMI_PRODUCT_VERSION));
4380 iommu_identity_mapping |= IDENTMAP_AZALIA;
4381 return;
4382 }
4383
4384 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4385 vtisochctrl);
4386}