blob: a22c86c867faee78544b99b8528e1f4b9fc183c8 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040027#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070028#include <linux/slab.h>
29#include <linux/irq.h>
30#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070031#include <linux/spinlock.h>
32#include <linux/pci.h>
33#include <linux/dmar.h>
34#include <linux/dma-mapping.h>
35#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080044#include <linux/memblock.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070045#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090047#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070048
Joerg Roedel078e1ee2012-09-26 12:44:43 +020049#include "irq_remapping.h"
Varun Sethi61e015a2013-04-23 10:05:24 +053050#include "pci.h"
Joerg Roedel078e1ee2012-09-26 12:44:43 +020051
Fenghua Yu5b6985c2008-10-16 18:02:32 -070052#define ROOT_SIZE VTD_PAGE_SIZE
53#define CONTEXT_SIZE VTD_PAGE_SIZE
54
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
56#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070057#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058
59#define IOAPIC_RANGE_START (0xfee00000)
60#define IOAPIC_RANGE_END (0xfeefffff)
61#define IOVA_START_ADDR (0x1000)
62
63#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
64
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070065#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080066#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070067
David Woodhouse2ebe3152009-09-19 07:34:04 -070068#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
69#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
70
71/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
72 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
73#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
74 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
75#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070076
Mark McLoughlinf27be032008-11-20 15:49:43 +000077#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070078#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070079#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080080
Andrew Mortondf08cdc2010-09-22 13:05:11 -070081/* page table handling */
82#define LEVEL_STRIDE (9)
83#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
84
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020085/*
86 * This bitmap is used to advertise the page sizes our hardware support
87 * to the IOMMU core, which will then use this information to split
88 * physically contiguous memory regions it is mapping into page sizes
89 * that we support.
90 *
91 * Traditionally the IOMMU core just handed us the mappings directly,
92 * after making sure the size is an order of a 4KiB page and that the
93 * mapping has natural alignment.
94 *
95 * To retain this behavior, we currently advertise that we support
96 * all page sizes that are an order of 4KiB.
97 *
98 * If at some point we'd like to utilize the IOMMU core's new behavior,
99 * we could change this to advertise the real page sizes we support.
100 */
101#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
102
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700103static inline int agaw_to_level(int agaw)
104{
105 return agaw + 2;
106}
107
108static inline int agaw_to_width(int agaw)
109{
Jiang Liu5c645b32014-01-06 14:18:12 +0800110 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700111}
112
113static inline int width_to_agaw(int width)
114{
Jiang Liu5c645b32014-01-06 14:18:12 +0800115 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700116}
117
118static inline unsigned int level_to_offset_bits(int level)
119{
120 return (level - 1) * LEVEL_STRIDE;
121}
122
123static inline int pfn_level_offset(unsigned long pfn, int level)
124{
125 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
126}
127
128static inline unsigned long level_mask(int level)
129{
130 return -1UL << level_to_offset_bits(level);
131}
132
133static inline unsigned long level_size(int level)
134{
135 return 1UL << level_to_offset_bits(level);
136}
137
138static inline unsigned long align_to_level(unsigned long pfn, int level)
139{
140 return (pfn + level_size(level) - 1) & level_mask(level);
141}
David Woodhousefd18de52009-05-10 23:57:41 +0100142
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100143static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
144{
Jiang Liu5c645b32014-01-06 14:18:12 +0800145 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100146}
147
David Woodhousedd4e8312009-06-27 16:21:20 +0100148/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
149 are never going to work. */
150static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
151{
152 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
153}
154
155static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
156{
157 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
158}
159static inline unsigned long page_to_dma_pfn(struct page *pg)
160{
161 return mm_to_dma_pfn(page_to_pfn(pg));
162}
163static inline unsigned long virt_to_dma_pfn(void *p)
164{
165 return page_to_dma_pfn(virt_to_page(p));
166}
167
Weidong Hand9630fe2008-12-08 11:06:32 +0800168/* global iommu list, set NULL for ignored DMAR units */
169static struct intel_iommu **g_iommus;
170
David Woodhousee0fc7e02009-09-30 09:12:17 -0700171static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000172static int rwbf_quirk;
173
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000174/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700175 * set to 1 to panic kernel if can't successfully enable VT-d
176 * (used when kernel is launched w/ TXT)
177 */
178static int force_on = 0;
179
180/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000181 * 0: Present
182 * 1-11: Reserved
183 * 12-63: Context Ptr (12 - (haw-1))
184 * 64-127: Reserved
185 */
186struct root_entry {
187 u64 val;
188 u64 rsvd1;
189};
190#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
191static inline bool root_present(struct root_entry *root)
192{
193 return (root->val & 1);
194}
195static inline void set_root_present(struct root_entry *root)
196{
197 root->val |= 1;
198}
199static inline void set_root_value(struct root_entry *root, unsigned long value)
200{
201 root->val |= value & VTD_PAGE_MASK;
202}
203
204static inline struct context_entry *
205get_context_addr_from_root(struct root_entry *root)
206{
207 return (struct context_entry *)
208 (root_present(root)?phys_to_virt(
209 root->val & VTD_PAGE_MASK) :
210 NULL);
211}
212
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000213/*
214 * low 64 bits:
215 * 0: present
216 * 1: fault processing disable
217 * 2-3: translation type
218 * 12-63: address space root
219 * high 64 bits:
220 * 0-2: address width
221 * 3-6: aval
222 * 8-23: domain id
223 */
224struct context_entry {
225 u64 lo;
226 u64 hi;
227};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000228
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000229static inline bool context_present(struct context_entry *context)
230{
231 return (context->lo & 1);
232}
233static inline void context_set_present(struct context_entry *context)
234{
235 context->lo |= 1;
236}
237
238static inline void context_set_fault_enable(struct context_entry *context)
239{
240 context->lo &= (((u64)-1) << 2) | 1;
241}
242
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000243static inline void context_set_translation_type(struct context_entry *context,
244 unsigned long value)
245{
246 context->lo &= (((u64)-1) << 4) | 3;
247 context->lo |= (value & 3) << 2;
248}
249
250static inline void context_set_address_root(struct context_entry *context,
251 unsigned long value)
252{
253 context->lo |= value & VTD_PAGE_MASK;
254}
255
256static inline void context_set_address_width(struct context_entry *context,
257 unsigned long value)
258{
259 context->hi |= value & 7;
260}
261
262static inline void context_set_domain_id(struct context_entry *context,
263 unsigned long value)
264{
265 context->hi |= (value & ((1 << 16) - 1)) << 8;
266}
267
268static inline void context_clear_entry(struct context_entry *context)
269{
270 context->lo = 0;
271 context->hi = 0;
272}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000273
Mark McLoughlin622ba122008-11-20 15:49:46 +0000274/*
275 * 0: readable
276 * 1: writable
277 * 2-6: reserved
278 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800279 * 8-10: available
280 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000281 * 12-63: Host physcial address
282 */
283struct dma_pte {
284 u64 val;
285};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000286
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000287static inline void dma_clear_pte(struct dma_pte *pte)
288{
289 pte->val = 0;
290}
291
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000292static inline u64 dma_pte_addr(struct dma_pte *pte)
293{
David Woodhousec85994e2009-07-01 19:21:24 +0100294#ifdef CONFIG_64BIT
295 return pte->val & VTD_PAGE_MASK;
296#else
297 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100298 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100299#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000300}
301
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000302static inline bool dma_pte_present(struct dma_pte *pte)
303{
304 return (pte->val & 3) != 0;
305}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000306
Allen Kay4399c8b2011-10-14 12:32:46 -0700307static inline bool dma_pte_superpage(struct dma_pte *pte)
308{
309 return (pte->val & (1 << 7));
310}
311
David Woodhouse75e6bf92009-07-02 11:21:16 +0100312static inline int first_pte_in_page(struct dma_pte *pte)
313{
314 return !((unsigned long)pte & ~VTD_PAGE_MASK);
315}
316
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700317/*
318 * This domain is a statically identity mapping domain.
319 * 1. This domain creats a static 1:1 mapping to all usable memory.
320 * 2. It maps to each iommu if successful.
321 * 3. Each iommu mapps to this domain if successful.
322 */
David Woodhouse19943b02009-08-04 16:19:20 +0100323static struct dmar_domain *si_domain;
324static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700325
Weidong Han3b5410e2008-12-08 09:17:15 +0800326/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100327#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800328
Weidong Han1ce28fe2008-12-08 16:35:39 +0800329/* domain represents a virtual machine, more than one devices
330 * across iommus may be owned in one domain, e.g. kvm guest.
331 */
332#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
333
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700334/* si_domain contains mulitple devices */
335#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
336
Mike Travis1b198bb2012-03-05 15:05:16 -0800337/* define the limit of IOMMUs supported in each domain */
338#ifdef CONFIG_X86
339# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
340#else
341# define IOMMU_UNITS_SUPPORTED 64
342#endif
343
Mark McLoughlin99126f72008-11-20 15:49:47 +0000344struct dmar_domain {
345 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700346 int nid; /* node id */
Mike Travis1b198bb2012-03-05 15:05:16 -0800347 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
348 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000349
350 struct list_head devices; /* all devices' list */
351 struct iova_domain iovad; /* iova's that belong to this domain */
352
353 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000354 int gaw; /* max guest address width */
355
356 /* adjusted guest address width, 0 is level 2 30-bit */
357 int agaw;
358
Weidong Han3b5410e2008-12-08 09:17:15 +0800359 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800360
361 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800362 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800363 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100364 int iommu_superpage;/* Level of superpages supported:
365 0 == 4KiB (no superpages), 1 == 2MiB,
366 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800367 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800368 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000369};
370
Mark McLoughlina647dac2008-11-20 15:49:48 +0000371/* PCI domain-device relationship */
372struct device_domain_info {
373 struct list_head link; /* link to domain siblings */
374 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100375 int segment; /* PCI domain */
376 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000377 u8 devfn; /* PCI devfn number */
Stefan Assmann45e829e2009-12-03 06:49:24 -0500378 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800379 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000380 struct dmar_domain *domain; /* pointer to domain */
381};
382
mark gross5e0d2a62008-03-04 15:22:08 -0800383static void flush_unmaps_timeout(unsigned long data);
384
Jiang Liub707cb02014-01-06 14:18:26 +0800385static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800386
mark gross80b20dd2008-04-18 13:53:58 -0700387#define HIGH_WATER_MARK 250
388struct deferred_flush_tables {
389 int next;
390 struct iova *iova[HIGH_WATER_MARK];
391 struct dmar_domain *domain[HIGH_WATER_MARK];
392};
393
394static struct deferred_flush_tables *deferred_flush;
395
mark gross5e0d2a62008-03-04 15:22:08 -0800396/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800397static int g_num_of_iommus;
398
399static DEFINE_SPINLOCK(async_umap_flush_lock);
400static LIST_HEAD(unmaps_to_do);
401
402static int timer_on;
403static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800404
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700405static void domain_remove_dev_info(struct dmar_domain *domain);
406
Suresh Siddhad3f13812011-08-23 17:05:25 -0700407#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800408int dmar_disabled = 0;
409#else
410int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700411#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800412
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200413int intel_iommu_enabled = 0;
414EXPORT_SYMBOL_GPL(intel_iommu_enabled);
415
David Woodhouse2d9e6672010-06-15 10:57:57 +0100416static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700417static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800418static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100419static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700420
David Woodhousec0771df2011-10-14 20:59:46 +0100421int intel_iommu_gfx_mapped;
422EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
423
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700424#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
425static DEFINE_SPINLOCK(device_domain_lock);
426static LIST_HEAD(device_domain_list);
427
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100428static struct iommu_ops intel_iommu_ops;
429
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700430static int __init intel_iommu_setup(char *str)
431{
432 if (!str)
433 return -EINVAL;
434 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800435 if (!strncmp(str, "on", 2)) {
436 dmar_disabled = 0;
437 printk(KERN_INFO "Intel-IOMMU: enabled\n");
438 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700439 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800440 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700441 } else if (!strncmp(str, "igfx_off", 8)) {
442 dmar_map_gfx = 0;
443 printk(KERN_INFO
444 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700445 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800446 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700447 "Intel-IOMMU: Forcing DAC for PCI devices\n");
448 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800449 } else if (!strncmp(str, "strict", 6)) {
450 printk(KERN_INFO
451 "Intel-IOMMU: disable batched IOTLB flush\n");
452 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100453 } else if (!strncmp(str, "sp_off", 6)) {
454 printk(KERN_INFO
455 "Intel-IOMMU: disable supported super page\n");
456 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700457 }
458
459 str += strcspn(str, ",");
460 while (*str == ',')
461 str++;
462 }
463 return 0;
464}
465__setup("intel_iommu=", intel_iommu_setup);
466
467static struct kmem_cache *iommu_domain_cache;
468static struct kmem_cache *iommu_devinfo_cache;
469static struct kmem_cache *iommu_iova_cache;
470
Suresh Siddha4c923d42009-10-02 11:01:24 -0700471static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700472{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700473 struct page *page;
474 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700475
Suresh Siddha4c923d42009-10-02 11:01:24 -0700476 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
477 if (page)
478 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700479 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700480}
481
482static inline void free_pgtable_page(void *vaddr)
483{
484 free_page((unsigned long)vaddr);
485}
486
487static inline void *alloc_domain_mem(void)
488{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900489 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700490}
491
Kay, Allen M38717942008-09-09 18:37:29 +0300492static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700493{
494 kmem_cache_free(iommu_domain_cache, vaddr);
495}
496
497static inline void * alloc_devinfo_mem(void)
498{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900499 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700500}
501
502static inline void free_devinfo_mem(void *vaddr)
503{
504 kmem_cache_free(iommu_devinfo_cache, vaddr);
505}
506
507struct iova *alloc_iova_mem(void)
508{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900509 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700510}
511
512void free_iova_mem(struct iova *iova)
513{
514 kmem_cache_free(iommu_iova_cache, iova);
515}
516
Weidong Han1b573682008-12-08 15:34:06 +0800517
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700518static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800519{
520 unsigned long sagaw;
521 int agaw = -1;
522
523 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700524 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800525 agaw >= 0; agaw--) {
526 if (test_bit(agaw, &sagaw))
527 break;
528 }
529
530 return agaw;
531}
532
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700533/*
534 * Calculate max SAGAW for each iommu.
535 */
536int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
537{
538 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
539}
540
541/*
542 * calculate agaw for each iommu.
543 * "SAGAW" may be different across iommus, use a default agaw, and
544 * get a supported less agaw for iommus that don't support the default agaw.
545 */
546int iommu_calculate_agaw(struct intel_iommu *iommu)
547{
548 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
549}
550
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700551/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800552static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
553{
554 int iommu_id;
555
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700556 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800557 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700558 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800559
Mike Travis1b198bb2012-03-05 15:05:16 -0800560 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800561 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
562 return NULL;
563
564 return g_iommus[iommu_id];
565}
566
Weidong Han8e6040972008-12-08 15:49:06 +0800567static void domain_update_iommu_coherency(struct dmar_domain *domain)
568{
569 int i;
570
Alex Williamson2e12bc22011-11-11 17:26:44 -0700571 i = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
572
573 domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800574
Mike Travis1b198bb2012-03-05 15:05:16 -0800575 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Weidong Han8e6040972008-12-08 15:49:06 +0800576 if (!ecap_coherent(g_iommus[i]->ecap)) {
577 domain->iommu_coherency = 0;
578 break;
579 }
Weidong Han8e6040972008-12-08 15:49:06 +0800580 }
581}
582
Sheng Yang58c610b2009-03-18 15:33:05 +0800583static void domain_update_iommu_snooping(struct dmar_domain *domain)
584{
585 int i;
586
587 domain->iommu_snooping = 1;
588
Mike Travis1b198bb2012-03-05 15:05:16 -0800589 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800590 if (!ecap_sc_support(g_iommus[i]->ecap)) {
591 domain->iommu_snooping = 0;
592 break;
593 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800594 }
595}
596
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100597static void domain_update_iommu_superpage(struct dmar_domain *domain)
598{
Allen Kay8140a952011-10-14 12:32:17 -0700599 struct dmar_drhd_unit *drhd;
600 struct intel_iommu *iommu = NULL;
601 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100602
603 if (!intel_iommu_superpage) {
604 domain->iommu_superpage = 0;
605 return;
606 }
607
Allen Kay8140a952011-10-14 12:32:17 -0700608 /* set iommu_superpage to the smallest common denominator */
609 for_each_active_iommu(iommu, drhd) {
610 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100611 if (!mask) {
612 break;
613 }
614 }
615 domain->iommu_superpage = fls(mask);
616}
617
Sheng Yang58c610b2009-03-18 15:33:05 +0800618/* Some capabilities may be different across iommus */
619static void domain_update_iommu_cap(struct dmar_domain *domain)
620{
621 domain_update_iommu_coherency(domain);
622 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100623 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800624}
625
David Woodhouse276dbf992009-04-04 01:45:37 +0100626static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800627{
628 struct dmar_drhd_unit *drhd = NULL;
629 int i;
630
Jiang Liu7c919772014-01-06 14:18:18 +0800631 for_each_active_drhd_unit(drhd) {
David Woodhouse276dbf992009-04-04 01:45:37 +0100632 if (segment != drhd->segment)
633 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800634
David Woodhouse924b6232009-04-04 00:39:25 +0100635 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000636 if (drhd->devices[i] &&
637 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800638 drhd->devices[i]->devfn == devfn)
639 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700640 if (drhd->devices[i] &&
641 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100642 drhd->devices[i]->subordinate->number <= bus &&
Yinghai Lub918c622012-05-17 18:51:11 -0700643 drhd->devices[i]->subordinate->busn_res.end >= bus)
David Woodhouse924b6232009-04-04 00:39:25 +0100644 return drhd->iommu;
645 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800646
647 if (drhd->include_all)
648 return drhd->iommu;
649 }
650
651 return NULL;
652}
653
Weidong Han5331fe62008-12-08 23:00:00 +0800654static void domain_flush_cache(struct dmar_domain *domain,
655 void *addr, int size)
656{
657 if (!domain->iommu_coherency)
658 clflush_cache_range(addr, size);
659}
660
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700661/* Gets context entry for a given bus and devfn */
662static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
663 u8 bus, u8 devfn)
664{
665 struct root_entry *root;
666 struct context_entry *context;
667 unsigned long phy_addr;
668 unsigned long flags;
669
670 spin_lock_irqsave(&iommu->lock, flags);
671 root = &iommu->root_entry[bus];
672 context = get_context_addr_from_root(root);
673 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700674 context = (struct context_entry *)
675 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700676 if (!context) {
677 spin_unlock_irqrestore(&iommu->lock, flags);
678 return NULL;
679 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700680 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700681 phy_addr = virt_to_phys((void *)context);
682 set_root_value(root, phy_addr);
683 set_root_present(root);
684 __iommu_flush_cache(iommu, root, sizeof(*root));
685 }
686 spin_unlock_irqrestore(&iommu->lock, flags);
687 return &context[devfn];
688}
689
690static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
691{
692 struct root_entry *root;
693 struct context_entry *context;
694 int ret;
695 unsigned long flags;
696
697 spin_lock_irqsave(&iommu->lock, flags);
698 root = &iommu->root_entry[bus];
699 context = get_context_addr_from_root(root);
700 if (!context) {
701 ret = 0;
702 goto out;
703 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000704 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700705out:
706 spin_unlock_irqrestore(&iommu->lock, flags);
707 return ret;
708}
709
710static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
711{
712 struct root_entry *root;
713 struct context_entry *context;
714 unsigned long flags;
715
716 spin_lock_irqsave(&iommu->lock, flags);
717 root = &iommu->root_entry[bus];
718 context = get_context_addr_from_root(root);
719 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000720 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700721 __iommu_flush_cache(iommu, &context[devfn], \
722 sizeof(*context));
723 }
724 spin_unlock_irqrestore(&iommu->lock, flags);
725}
726
727static void free_context_table(struct intel_iommu *iommu)
728{
729 struct root_entry *root;
730 int i;
731 unsigned long flags;
732 struct context_entry *context;
733
734 spin_lock_irqsave(&iommu->lock, flags);
735 if (!iommu->root_entry) {
736 goto out;
737 }
738 for (i = 0; i < ROOT_ENTRY_NR; i++) {
739 root = &iommu->root_entry[i];
740 context = get_context_addr_from_root(root);
741 if (context)
742 free_pgtable_page(context);
743 }
744 free_pgtable_page(iommu->root_entry);
745 iommu->root_entry = NULL;
746out:
747 spin_unlock_irqrestore(&iommu->lock, flags);
748}
749
David Woodhouseb026fd22009-06-28 10:37:25 +0100750static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
Allen Kay4399c8b2011-10-14 12:32:46 -0700751 unsigned long pfn, int target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700752{
David Woodhouseb026fd22009-06-28 10:37:25 +0100753 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700754 struct dma_pte *parent, *pte = NULL;
755 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700756 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700757
758 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200759
760 if (addr_width < BITS_PER_LONG && pfn >> addr_width)
761 /* Address beyond IOMMU's addressing capabilities. */
762 return NULL;
763
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700764 parent = domain->pgd;
765
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700766 while (level > 0) {
767 void *tmp_page;
768
David Woodhouseb026fd22009-06-28 10:37:25 +0100769 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700770 pte = &parent[offset];
Allen Kay4399c8b2011-10-14 12:32:46 -0700771 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100772 break;
773 if (level == target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700774 break;
775
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000776 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100777 uint64_t pteval;
778
Suresh Siddha4c923d42009-10-02 11:01:24 -0700779 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700780
David Woodhouse206a73c12009-07-01 19:30:28 +0100781 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700782 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100783
David Woodhousec85994e2009-07-01 19:21:24 +0100784 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400785 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100786 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
787 /* Someone else set it while we were thinking; use theirs. */
788 free_pgtable_page(tmp_page);
789 } else {
790 dma_pte_addr(pte);
791 domain_flush_cache(domain, pte, sizeof(*pte));
792 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700793 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000794 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700795 level--;
796 }
797
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700798 return pte;
799}
800
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100801
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700802/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100803static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
804 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100805 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700806{
807 struct dma_pte *parent, *pte = NULL;
808 int total = agaw_to_level(domain->agaw);
809 int offset;
810
811 parent = domain->pgd;
812 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100813 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700814 pte = &parent[offset];
815 if (level == total)
816 return pte;
817
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100818 if (!dma_pte_present(pte)) {
819 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700820 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100821 }
822
823 if (pte->val & DMA_PTE_LARGE_PAGE) {
824 *large_page = total;
825 return pte;
826 }
827
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000828 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700829 total--;
830 }
831 return NULL;
832}
833
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700834/* clear last level pte, a tlb flush should be followed */
Allen Kay292827c2011-10-14 12:31:54 -0700835static int dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100836 unsigned long start_pfn,
837 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700838{
David Woodhouse04b18e62009-06-27 19:15:01 +0100839 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100840 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100841 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700842
David Woodhouse04b18e62009-06-27 19:15:01 +0100843 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100844 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700845 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100846
David Woodhouse04b18e62009-06-27 19:15:01 +0100847 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700848 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100849 large_page = 1;
850 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100851 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100852 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100853 continue;
854 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100855 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100856 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100857 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100858 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100859 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
860
David Woodhouse310a5ab2009-06-28 18:52:20 +0100861 domain_flush_cache(domain, first_pte,
862 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700863
864 } while (start_pfn && start_pfn <= last_pfn);
Allen Kay292827c2011-10-14 12:31:54 -0700865
Jiang Liu5c645b32014-01-06 14:18:12 +0800866 return min_t(int, (large_page - 1) * 9, MAX_AGAW_PFN_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700867}
868
Alex Williamson3269ee02013-06-15 10:27:19 -0600869static void dma_pte_free_level(struct dmar_domain *domain, int level,
870 struct dma_pte *pte, unsigned long pfn,
871 unsigned long start_pfn, unsigned long last_pfn)
872{
873 pfn = max(start_pfn, pfn);
874 pte = &pte[pfn_level_offset(pfn, level)];
875
876 do {
877 unsigned long level_pfn;
878 struct dma_pte *level_pte;
879
880 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
881 goto next;
882
883 level_pfn = pfn & level_mask(level - 1);
884 level_pte = phys_to_virt(dma_pte_addr(pte));
885
886 if (level > 2)
887 dma_pte_free_level(domain, level - 1, level_pte,
888 level_pfn, start_pfn, last_pfn);
889
890 /* If range covers entire pagetable, free it */
891 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -0800892 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -0600893 dma_clear_pte(pte);
894 domain_flush_cache(domain, pte, sizeof(*pte));
895 free_pgtable_page(level_pte);
896 }
897next:
898 pfn += level_size(level);
899 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
900}
901
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700902/* free page table pages. last level pte should already be cleared */
903static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100904 unsigned long start_pfn,
905 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700906{
David Woodhouse6660c632009-06-27 22:41:00 +0100907 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700908
David Woodhouse6660c632009-06-27 22:41:00 +0100909 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
910 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700911 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700912
David Woodhousef3a0a522009-06-30 03:40:07 +0100913 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -0600914 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
915 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +0100916
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700917 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100918 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700919 free_pgtable_page(domain->pgd);
920 domain->pgd = NULL;
921 }
922}
923
924/* iommu handling */
925static int iommu_alloc_root_entry(struct intel_iommu *iommu)
926{
927 struct root_entry *root;
928 unsigned long flags;
929
Suresh Siddha4c923d42009-10-02 11:01:24 -0700930 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700931 if (!root)
932 return -ENOMEM;
933
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700934 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700935
936 spin_lock_irqsave(&iommu->lock, flags);
937 iommu->root_entry = root;
938 spin_unlock_irqrestore(&iommu->lock, flags);
939
940 return 0;
941}
942
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700943static void iommu_set_root_entry(struct intel_iommu *iommu)
944{
945 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100946 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700947 unsigned long flag;
948
949 addr = iommu->root_entry;
950
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200951 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700952 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
953
David Woodhousec416daa2009-05-10 20:30:58 +0100954 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700955
956 /* Make sure hardware complete it */
957 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100958 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700959
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200960 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700961}
962
963static void iommu_flush_write_buffer(struct intel_iommu *iommu)
964{
965 u32 val;
966 unsigned long flag;
967
David Woodhouse9af88142009-02-13 23:18:03 +0000968 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700969 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700970
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200971 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100972 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700973
974 /* Make sure hardware complete it */
975 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100976 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700977
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200978 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700979}
980
981/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100982static void __iommu_flush_context(struct intel_iommu *iommu,
983 u16 did, u16 source_id, u8 function_mask,
984 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700985{
986 u64 val = 0;
987 unsigned long flag;
988
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700989 switch (type) {
990 case DMA_CCMD_GLOBAL_INVL:
991 val = DMA_CCMD_GLOBAL_INVL;
992 break;
993 case DMA_CCMD_DOMAIN_INVL:
994 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
995 break;
996 case DMA_CCMD_DEVICE_INVL:
997 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
998 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
999 break;
1000 default:
1001 BUG();
1002 }
1003 val |= DMA_CCMD_ICC;
1004
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001005 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001006 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1007
1008 /* Make sure hardware complete it */
1009 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1010 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1011
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001012 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001013}
1014
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001015/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001016static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1017 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001018{
1019 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1020 u64 val = 0, val_iva = 0;
1021 unsigned long flag;
1022
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001023 switch (type) {
1024 case DMA_TLB_GLOBAL_FLUSH:
1025 /* global flush doesn't need set IVA_REG */
1026 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1027 break;
1028 case DMA_TLB_DSI_FLUSH:
1029 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1030 break;
1031 case DMA_TLB_PSI_FLUSH:
1032 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1033 /* Note: always flush non-leaf currently */
1034 val_iva = size_order | addr;
1035 break;
1036 default:
1037 BUG();
1038 }
1039 /* Note: set drain read/write */
1040#if 0
1041 /*
1042 * This is probably to be super secure.. Looks like we can
1043 * ignore it without any impact.
1044 */
1045 if (cap_read_drain(iommu->cap))
1046 val |= DMA_TLB_READ_DRAIN;
1047#endif
1048 if (cap_write_drain(iommu->cap))
1049 val |= DMA_TLB_WRITE_DRAIN;
1050
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001051 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001052 /* Note: Only uses first TLB reg currently */
1053 if (val_iva)
1054 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1055 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1056
1057 /* Make sure hardware complete it */
1058 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1059 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1060
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001061 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001062
1063 /* check IOTLB invalidation granularity */
1064 if (DMA_TLB_IAIG(val) == 0)
1065 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1066 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1067 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001068 (unsigned long long)DMA_TLB_IIRG(type),
1069 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001070}
1071
Yu Zhao93a23a72009-05-18 13:51:37 +08001072static struct device_domain_info *iommu_support_dev_iotlb(
1073 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001074{
Yu Zhao93a23a72009-05-18 13:51:37 +08001075 int found = 0;
1076 unsigned long flags;
1077 struct device_domain_info *info;
1078 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1079
1080 if (!ecap_dev_iotlb_support(iommu->ecap))
1081 return NULL;
1082
1083 if (!iommu->qi)
1084 return NULL;
1085
1086 spin_lock_irqsave(&device_domain_lock, flags);
1087 list_for_each_entry(info, &domain->devices, link)
1088 if (info->bus == bus && info->devfn == devfn) {
1089 found = 1;
1090 break;
1091 }
1092 spin_unlock_irqrestore(&device_domain_lock, flags);
1093
1094 if (!found || !info->dev)
1095 return NULL;
1096
1097 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1098 return NULL;
1099
1100 if (!dmar_find_matched_atsr_unit(info->dev))
1101 return NULL;
1102
1103 info->iommu = iommu;
1104
1105 return info;
1106}
1107
1108static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1109{
1110 if (!info)
1111 return;
1112
1113 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1114}
1115
1116static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1117{
1118 if (!info->dev || !pci_ats_enabled(info->dev))
1119 return;
1120
1121 pci_disable_ats(info->dev);
1122}
1123
1124static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1125 u64 addr, unsigned mask)
1126{
1127 u16 sid, qdep;
1128 unsigned long flags;
1129 struct device_domain_info *info;
1130
1131 spin_lock_irqsave(&device_domain_lock, flags);
1132 list_for_each_entry(info, &domain->devices, link) {
1133 if (!info->dev || !pci_ats_enabled(info->dev))
1134 continue;
1135
1136 sid = info->bus << 8 | info->devfn;
1137 qdep = pci_ats_queue_depth(info->dev);
1138 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1139 }
1140 spin_unlock_irqrestore(&device_domain_lock, flags);
1141}
1142
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001143static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
Nadav Amit82653632010-04-01 13:24:40 +03001144 unsigned long pfn, unsigned int pages, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001145{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001146 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001147 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001148
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001149 BUG_ON(pages == 0);
1150
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001151 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001152 * Fallback to domain selective flush if no PSI support or the size is
1153 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001154 * PSI requires page size to be 2 ^ x, and the base address is naturally
1155 * aligned to the size
1156 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001157 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1158 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001159 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001160 else
1161 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1162 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001163
1164 /*
Nadav Amit82653632010-04-01 13:24:40 +03001165 * In caching mode, changes of pages from non-present to present require
1166 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001167 */
Nadav Amit82653632010-04-01 13:24:40 +03001168 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001169 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001170}
1171
mark grossf8bab732008-02-08 04:18:38 -08001172static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1173{
1174 u32 pmen;
1175 unsigned long flags;
1176
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001177 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001178 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1179 pmen &= ~DMA_PMEN_EPM;
1180 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1181
1182 /* wait for the protected region status bit to clear */
1183 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1184 readl, !(pmen & DMA_PMEN_PRS), pmen);
1185
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001186 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001187}
1188
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001189static int iommu_enable_translation(struct intel_iommu *iommu)
1190{
1191 u32 sts;
1192 unsigned long flags;
1193
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001194 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001195 iommu->gcmd |= DMA_GCMD_TE;
1196 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001197
1198 /* Make sure hardware complete it */
1199 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001200 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001201
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001202 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001203 return 0;
1204}
1205
1206static int iommu_disable_translation(struct intel_iommu *iommu)
1207{
1208 u32 sts;
1209 unsigned long flag;
1210
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001211 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001212 iommu->gcmd &= ~DMA_GCMD_TE;
1213 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1214
1215 /* Make sure hardware complete it */
1216 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001217 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001218
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001219 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001220 return 0;
1221}
1222
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001223
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001224static int iommu_init_domains(struct intel_iommu *iommu)
1225{
1226 unsigned long ndomains;
1227 unsigned long nlongs;
1228
1229 ndomains = cap_ndoms(iommu->cap);
Jiang Liu852bdb02014-01-06 14:18:11 +08001230 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1231 iommu->seq_id, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001232 nlongs = BITS_TO_LONGS(ndomains);
1233
Donald Dutile94a91b52009-08-20 16:51:34 -04001234 spin_lock_init(&iommu->lock);
1235
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001236 /* TBD: there might be 64K domains,
1237 * consider other allocation for future chip
1238 */
1239 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1240 if (!iommu->domain_ids) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001241 pr_err("IOMMU%d: allocating domain id array failed\n",
1242 iommu->seq_id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001243 return -ENOMEM;
1244 }
1245 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1246 GFP_KERNEL);
1247 if (!iommu->domains) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001248 pr_err("IOMMU%d: allocating domain array failed\n",
1249 iommu->seq_id);
1250 kfree(iommu->domain_ids);
1251 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001252 return -ENOMEM;
1253 }
1254
1255 /*
1256 * if Caching mode is set, then invalid translations are tagged
1257 * with domainid 0. Hence we need to pre-allocate it.
1258 */
1259 if (cap_caching_mode(iommu->cap))
1260 set_bit(0, iommu->domain_ids);
1261 return 0;
1262}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001263
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001264
1265static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001266static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001267
Jiang Liua868e6b2014-01-06 14:18:20 +08001268static void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001269{
1270 struct dmar_domain *domain;
Jiang Liu5ced12a2014-01-06 14:18:22 +08001271 int i, count;
Weidong Hanc7151a82008-12-08 22:51:37 +08001272 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001273
Donald Dutile94a91b52009-08-20 16:51:34 -04001274 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001275 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Donald Dutile94a91b52009-08-20 16:51:34 -04001276 domain = iommu->domains[i];
1277 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001278
Donald Dutile94a91b52009-08-20 16:51:34 -04001279 spin_lock_irqsave(&domain->iommu_lock, flags);
Jiang Liu5ced12a2014-01-06 14:18:22 +08001280 count = --domain->iommu_count;
1281 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1282 if (count == 0) {
Donald Dutile94a91b52009-08-20 16:51:34 -04001283 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1284 vm_domain_exit(domain);
1285 else
1286 domain_exit(domain);
1287 }
Weidong Han5e98c4b2008-12-08 23:03:27 +08001288 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001289 }
1290
1291 if (iommu->gcmd & DMA_GCMD_TE)
1292 iommu_disable_translation(iommu);
1293
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001294 kfree(iommu->domains);
1295 kfree(iommu->domain_ids);
Jiang Liua868e6b2014-01-06 14:18:20 +08001296 iommu->domains = NULL;
1297 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001298
Weidong Hand9630fe2008-12-08 11:06:32 +08001299 g_iommus[iommu->seq_id] = NULL;
1300
1301 /* if all iommus are freed, free g_iommus */
1302 for (i = 0; i < g_num_of_iommus; i++) {
1303 if (g_iommus[i])
1304 break;
1305 }
1306
1307 if (i == g_num_of_iommus)
1308 kfree(g_iommus);
1309
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001310 /* free context mapping */
1311 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001312}
1313
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001314static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001315{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001316 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001317
1318 domain = alloc_domain_mem();
1319 if (!domain)
1320 return NULL;
1321
Suresh Siddha4c923d42009-10-02 11:01:24 -07001322 domain->nid = -1;
Mike Travis1b198bb2012-03-05 15:05:16 -08001323 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Hand71a2f32008-12-07 21:13:41 +08001324 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001325
1326 return domain;
1327}
1328
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001329static int iommu_attach_domain(struct dmar_domain *domain,
1330 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001331{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001332 int num;
1333 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001334 unsigned long flags;
1335
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001336 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001337
1338 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001339
1340 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1341 if (num >= ndomains) {
1342 spin_unlock_irqrestore(&iommu->lock, flags);
1343 printk(KERN_ERR "IOMMU: no free domain ids\n");
1344 return -ENOMEM;
1345 }
1346
1347 domain->id = num;
1348 set_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001349 set_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001350 iommu->domains[num] = domain;
1351 spin_unlock_irqrestore(&iommu->lock, flags);
1352
1353 return 0;
1354}
1355
1356static void iommu_detach_domain(struct dmar_domain *domain,
1357 struct intel_iommu *iommu)
1358{
1359 unsigned long flags;
1360 int num, ndomains;
1361 int found = 0;
1362
1363 spin_lock_irqsave(&iommu->lock, flags);
1364 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001365 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001366 if (iommu->domains[num] == domain) {
1367 found = 1;
1368 break;
1369 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001370 }
1371
1372 if (found) {
1373 clear_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001374 clear_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001375 iommu->domains[num] = NULL;
1376 }
Weidong Han8c11e792008-12-08 15:29:22 +08001377 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001378}
1379
1380static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001381static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001382
Joseph Cihula51a63e62011-03-21 11:04:24 -07001383static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001384{
1385 struct pci_dev *pdev = NULL;
1386 struct iova *iova;
1387 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001388
David Millerf6611972008-02-06 01:36:23 -08001389 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001390
Mark Gross8a443df2008-03-04 14:59:31 -08001391 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1392 &reserved_rbtree_key);
1393
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001394 /* IOAPIC ranges shouldn't be accessed by DMA */
1395 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1396 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001397 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001398 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001399 return -ENODEV;
1400 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001401
1402 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1403 for_each_pci_dev(pdev) {
1404 struct resource *r;
1405
1406 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1407 r = &pdev->resource[i];
1408 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1409 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001410 iova = reserve_iova(&reserved_iova_list,
1411 IOVA_PFN(r->start),
1412 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001413 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001414 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001415 return -ENODEV;
1416 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001417 }
1418 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001419 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001420}
1421
1422static void domain_reserve_special_ranges(struct dmar_domain *domain)
1423{
1424 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1425}
1426
1427static inline int guestwidth_to_adjustwidth(int gaw)
1428{
1429 int agaw;
1430 int r = (gaw - 12) % 9;
1431
1432 if (r == 0)
1433 agaw = gaw;
1434 else
1435 agaw = gaw + 9 - r;
1436 if (agaw > 64)
1437 agaw = 64;
1438 return agaw;
1439}
1440
1441static int domain_init(struct dmar_domain *domain, int guest_width)
1442{
1443 struct intel_iommu *iommu;
1444 int adjust_width, agaw;
1445 unsigned long sagaw;
1446
David Millerf6611972008-02-06 01:36:23 -08001447 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001448 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001449
1450 domain_reserve_special_ranges(domain);
1451
1452 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001453 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001454 if (guest_width > cap_mgaw(iommu->cap))
1455 guest_width = cap_mgaw(iommu->cap);
1456 domain->gaw = guest_width;
1457 adjust_width = guestwidth_to_adjustwidth(guest_width);
1458 agaw = width_to_agaw(adjust_width);
1459 sagaw = cap_sagaw(iommu->cap);
1460 if (!test_bit(agaw, &sagaw)) {
1461 /* hardware doesn't support it, choose a bigger one */
1462 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1463 agaw = find_next_bit(&sagaw, 5, agaw);
1464 if (agaw >= 5)
1465 return -ENODEV;
1466 }
1467 domain->agaw = agaw;
1468 INIT_LIST_HEAD(&domain->devices);
1469
Weidong Han8e6040972008-12-08 15:49:06 +08001470 if (ecap_coherent(iommu->ecap))
1471 domain->iommu_coherency = 1;
1472 else
1473 domain->iommu_coherency = 0;
1474
Sheng Yang58c610b2009-03-18 15:33:05 +08001475 if (ecap_sc_support(iommu->ecap))
1476 domain->iommu_snooping = 1;
1477 else
1478 domain->iommu_snooping = 0;
1479
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001480 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
Weidong Hanc7151a82008-12-08 22:51:37 +08001481 domain->iommu_count = 1;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001482 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001483
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001484 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001485 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001486 if (!domain->pgd)
1487 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001488 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001489 return 0;
1490}
1491
1492static void domain_exit(struct dmar_domain *domain)
1493{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001494 struct dmar_drhd_unit *drhd;
1495 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001496
1497 /* Domain 0 is reserved, so dont process it */
1498 if (!domain)
1499 return;
1500
Alex Williamson7b668352011-05-24 12:02:41 +01001501 /* Flush any lazy unmaps that may reference this domain */
1502 if (!intel_iommu_strict)
1503 flush_unmaps_timeout(0);
1504
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001505 domain_remove_dev_info(domain);
1506 /* destroy iovas */
1507 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001508
1509 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001510 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001511
1512 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001513 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001514
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001515 for_each_active_iommu(iommu, drhd)
Mike Travis1b198bb2012-03-05 15:05:16 -08001516 if (test_bit(iommu->seq_id, domain->iommu_bmp))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001517 iommu_detach_domain(domain, iommu);
1518
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001519 free_domain_mem(domain);
1520}
1521
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001522static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1523 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001524{
1525 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001526 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001527 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001528 struct dma_pte *pgd;
1529 unsigned long num;
1530 unsigned long ndomains;
1531 int id;
1532 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001533 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001534
1535 pr_debug("Set context mapping for %02x:%02x.%d\n",
1536 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001537
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001538 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001539 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1540 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001541
David Woodhouse276dbf992009-04-04 01:45:37 +01001542 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001543 if (!iommu)
1544 return -ENODEV;
1545
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001546 context = device_to_context_entry(iommu, bus, devfn);
1547 if (!context)
1548 return -ENOMEM;
1549 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001550 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001551 spin_unlock_irqrestore(&iommu->lock, flags);
1552 return 0;
1553 }
1554
Weidong Hanea6606b2008-12-08 23:08:15 +08001555 id = domain->id;
1556 pgd = domain->pgd;
1557
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001558 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1559 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001560 int found = 0;
1561
1562 /* find an available domain id for this device in iommu */
1563 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001564 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001565 if (iommu->domains[num] == domain) {
1566 id = num;
1567 found = 1;
1568 break;
1569 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001570 }
1571
1572 if (found == 0) {
1573 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1574 if (num >= ndomains) {
1575 spin_unlock_irqrestore(&iommu->lock, flags);
1576 printk(KERN_ERR "IOMMU: no free domain ids\n");
1577 return -EFAULT;
1578 }
1579
1580 set_bit(num, iommu->domain_ids);
1581 iommu->domains[num] = domain;
1582 id = num;
1583 }
1584
1585 /* Skip top levels of page tables for
1586 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001587 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001588 */
Chris Wright1672af12009-12-02 12:06:34 -08001589 if (translation != CONTEXT_TT_PASS_THROUGH) {
1590 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1591 pgd = phys_to_virt(dma_pte_addr(pgd));
1592 if (!dma_pte_present(pgd)) {
1593 spin_unlock_irqrestore(&iommu->lock, flags);
1594 return -ENOMEM;
1595 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001596 }
1597 }
1598 }
1599
1600 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001601
Yu Zhao93a23a72009-05-18 13:51:37 +08001602 if (translation != CONTEXT_TT_PASS_THROUGH) {
1603 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1604 translation = info ? CONTEXT_TT_DEV_IOTLB :
1605 CONTEXT_TT_MULTI_LEVEL;
1606 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001607 /*
1608 * In pass through mode, AW must be programmed to indicate the largest
1609 * AGAW value supported by hardware. And ASR is ignored by hardware.
1610 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001611 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001612 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001613 else {
1614 context_set_address_root(context, virt_to_phys(pgd));
1615 context_set_address_width(context, iommu->agaw);
1616 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001617
1618 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001619 context_set_fault_enable(context);
1620 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001621 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001622
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001623 /*
1624 * It's a non-present to present mapping. If hardware doesn't cache
1625 * non-present entry we only need to flush the write-buffer. If the
1626 * _does_ cache non-present entries, then it does so in the special
1627 * domain #0, which we have to flush:
1628 */
1629 if (cap_caching_mode(iommu->cap)) {
1630 iommu->flush.flush_context(iommu, 0,
1631 (((u16)bus) << 8) | devfn,
1632 DMA_CCMD_MASK_NOBIT,
1633 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001634 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001635 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001636 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001637 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001638 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001639 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001640
1641 spin_lock_irqsave(&domain->iommu_lock, flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08001642 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08001643 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001644 if (domain->iommu_count == 1)
1645 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001646 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001647 }
1648 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001649 return 0;
1650}
1651
1652static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001653domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1654 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001655{
1656 int ret;
1657 struct pci_dev *tmp, *parent;
1658
David Woodhouse276dbf992009-04-04 01:45:37 +01001659 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001660 pdev->bus->number, pdev->devfn,
1661 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001662 if (ret)
1663 return ret;
1664
1665 /* dependent device mapping */
1666 tmp = pci_find_upstream_pcie_bridge(pdev);
1667 if (!tmp)
1668 return 0;
1669 /* Secondary interface's bus number and devfn 0 */
1670 parent = pdev->bus->self;
1671 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001672 ret = domain_context_mapping_one(domain,
1673 pci_domain_nr(parent->bus),
1674 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001675 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001676 if (ret)
1677 return ret;
1678 parent = parent->bus->self;
1679 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001680 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001681 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001682 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001683 tmp->subordinate->number, 0,
1684 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001685 else /* this is a legacy PCI bridge */
1686 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001687 pci_domain_nr(tmp->bus),
1688 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001689 tmp->devfn,
1690 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001691}
1692
Weidong Han5331fe62008-12-08 23:00:00 +08001693static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001694{
1695 int ret;
1696 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001697 struct intel_iommu *iommu;
1698
David Woodhouse276dbf992009-04-04 01:45:37 +01001699 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1700 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001701 if (!iommu)
1702 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001703
David Woodhouse276dbf992009-04-04 01:45:37 +01001704 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001705 if (!ret)
1706 return ret;
1707 /* dependent device mapping */
1708 tmp = pci_find_upstream_pcie_bridge(pdev);
1709 if (!tmp)
1710 return ret;
1711 /* Secondary interface's bus number and devfn 0 */
1712 parent = pdev->bus->self;
1713 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001714 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001715 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001716 if (!ret)
1717 return ret;
1718 parent = parent->bus->self;
1719 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001720 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001721 return device_context_mapped(iommu, tmp->subordinate->number,
1722 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001723 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001724 return device_context_mapped(iommu, tmp->bus->number,
1725 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001726}
1727
Fenghua Yuf5329592009-08-04 15:09:37 -07001728/* Returns a number of VTD pages, but aligned to MM page size */
1729static inline unsigned long aligned_nrpages(unsigned long host_addr,
1730 size_t size)
1731{
1732 host_addr &= ~PAGE_MASK;
1733 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1734}
1735
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001736/* Return largest possible superpage level for a given mapping */
1737static inline int hardware_largepage_caps(struct dmar_domain *domain,
1738 unsigned long iov_pfn,
1739 unsigned long phy_pfn,
1740 unsigned long pages)
1741{
1742 int support, level = 1;
1743 unsigned long pfnmerge;
1744
1745 support = domain->iommu_superpage;
1746
1747 /* To use a large page, the virtual *and* physical addresses
1748 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1749 of them will mean we have to use smaller pages. So just
1750 merge them and check both at once. */
1751 pfnmerge = iov_pfn | phy_pfn;
1752
1753 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1754 pages >>= VTD_STRIDE_SHIFT;
1755 if (!pages)
1756 break;
1757 pfnmerge >>= VTD_STRIDE_SHIFT;
1758 level++;
1759 support--;
1760 }
1761 return level;
1762}
1763
David Woodhouse9051aa02009-06-29 12:30:54 +01001764static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1765 struct scatterlist *sg, unsigned long phys_pfn,
1766 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001767{
1768 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001769 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001770 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001771 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001772 unsigned int largepage_lvl = 0;
1773 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001774
1775 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1776
1777 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1778 return -EINVAL;
1779
1780 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1781
David Woodhouse9051aa02009-06-29 12:30:54 +01001782 if (sg)
1783 sg_res = 0;
1784 else {
1785 sg_res = nr_pages + 1;
1786 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1787 }
1788
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001789 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001790 uint64_t tmp;
1791
David Woodhousee1605492009-06-29 11:17:38 +01001792 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001793 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001794 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1795 sg->dma_length = sg->length;
1796 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001797 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001798 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001799
David Woodhousee1605492009-06-29 11:17:38 +01001800 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001801 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1802
1803 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01001804 if (!pte)
1805 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001806 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001807 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001808 pteval |= DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001809 /* Ensure that old small page tables are removed to make room
1810 for superpage, if they exist. */
1811 dma_pte_clear_range(domain, iov_pfn,
1812 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1813 dma_pte_free_pagetable(domain, iov_pfn,
1814 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1815 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001816 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001817 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001818
David Woodhousee1605492009-06-29 11:17:38 +01001819 }
1820 /* We don't need lock here, nobody else
1821 * touches the iova range
1822 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001823 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001824 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001825 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001826 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1827 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001828 if (dumps) {
1829 dumps--;
1830 debug_dma_dump_mappings(NULL);
1831 }
1832 WARN_ON(1);
1833 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001834
1835 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1836
1837 BUG_ON(nr_pages < lvl_pages);
1838 BUG_ON(sg_res < lvl_pages);
1839
1840 nr_pages -= lvl_pages;
1841 iov_pfn += lvl_pages;
1842 phys_pfn += lvl_pages;
1843 pteval += lvl_pages * VTD_PAGE_SIZE;
1844 sg_res -= lvl_pages;
1845
1846 /* If the next PTE would be the first in a new page, then we
1847 need to flush the cache on the entries we've just written.
1848 And then we'll need to recalculate 'pte', so clear it and
1849 let it get set again in the if (!pte) block above.
1850
1851 If we're done (!nr_pages) we need to flush the cache too.
1852
1853 Also if we've been setting superpages, we may need to
1854 recalculate 'pte' and switch back to smaller pages for the
1855 end of the mapping, if the trailing size is not enough to
1856 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01001857 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001858 if (!nr_pages || first_pte_in_page(pte) ||
1859 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01001860 domain_flush_cache(domain, first_pte,
1861 (void *)pte - (void *)first_pte);
1862 pte = NULL;
1863 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001864
1865 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01001866 sg = sg_next(sg);
1867 }
1868 return 0;
1869}
1870
David Woodhouse9051aa02009-06-29 12:30:54 +01001871static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1872 struct scatterlist *sg, unsigned long nr_pages,
1873 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001874{
David Woodhouse9051aa02009-06-29 12:30:54 +01001875 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1876}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001877
David Woodhouse9051aa02009-06-29 12:30:54 +01001878static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1879 unsigned long phys_pfn, unsigned long nr_pages,
1880 int prot)
1881{
1882 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001883}
1884
Weidong Hanc7151a82008-12-08 22:51:37 +08001885static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001886{
Weidong Hanc7151a82008-12-08 22:51:37 +08001887 if (!iommu)
1888 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001889
1890 clear_context_table(iommu, bus, devfn);
1891 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001892 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001893 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001894}
1895
David Woodhouse109b9b02012-05-25 17:43:02 +01001896static inline void unlink_domain_info(struct device_domain_info *info)
1897{
1898 assert_spin_locked(&device_domain_lock);
1899 list_del(&info->link);
1900 list_del(&info->global);
1901 if (info->dev)
1902 info->dev->dev.archdata.iommu = NULL;
1903}
1904
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001905static void domain_remove_dev_info(struct dmar_domain *domain)
1906{
1907 struct device_domain_info *info;
1908 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001909 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001910
1911 spin_lock_irqsave(&device_domain_lock, flags);
1912 while (!list_empty(&domain->devices)) {
1913 info = list_entry(domain->devices.next,
1914 struct device_domain_info, link);
David Woodhouse109b9b02012-05-25 17:43:02 +01001915 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001916 spin_unlock_irqrestore(&device_domain_lock, flags);
1917
Yu Zhao93a23a72009-05-18 13:51:37 +08001918 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001919 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001920 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001921 free_devinfo_mem(info);
1922
1923 spin_lock_irqsave(&device_domain_lock, flags);
1924 }
1925 spin_unlock_irqrestore(&device_domain_lock, flags);
1926}
1927
1928/*
1929 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001930 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001931 */
Kay, Allen M38717942008-09-09 18:37:29 +03001932static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001933find_domain(struct pci_dev *pdev)
1934{
1935 struct device_domain_info *info;
1936
1937 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001938 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001939 if (info)
1940 return info->domain;
1941 return NULL;
1942}
1943
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001944/* domain is initialized */
1945static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1946{
1947 struct dmar_domain *domain, *found = NULL;
1948 struct intel_iommu *iommu;
1949 struct dmar_drhd_unit *drhd;
1950 struct device_domain_info *info, *tmp;
1951 struct pci_dev *dev_tmp;
1952 unsigned long flags;
1953 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001954 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001955 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001956
1957 domain = find_domain(pdev);
1958 if (domain)
1959 return domain;
1960
David Woodhouse276dbf992009-04-04 01:45:37 +01001961 segment = pci_domain_nr(pdev->bus);
1962
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001963 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1964 if (dev_tmp) {
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001965 if (pci_is_pcie(dev_tmp)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001966 bus = dev_tmp->subordinate->number;
1967 devfn = 0;
1968 } else {
1969 bus = dev_tmp->bus->number;
1970 devfn = dev_tmp->devfn;
1971 }
1972 spin_lock_irqsave(&device_domain_lock, flags);
1973 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001974 if (info->segment == segment &&
1975 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001976 found = info->domain;
1977 break;
1978 }
1979 }
1980 spin_unlock_irqrestore(&device_domain_lock, flags);
1981 /* pcie-pci bridge already has a domain, uses it */
1982 if (found) {
1983 domain = found;
1984 goto found_domain;
1985 }
1986 }
1987
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001988 domain = alloc_domain();
1989 if (!domain)
1990 goto error;
1991
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001992 /* Allocate new domain for the device */
1993 drhd = dmar_find_matched_drhd_unit(pdev);
1994 if (!drhd) {
1995 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1996 pci_name(pdev));
Julia Lawalld2900bd2012-07-24 16:18:14 +02001997 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001998 return NULL;
1999 }
2000 iommu = drhd->iommu;
2001
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002002 ret = iommu_attach_domain(domain, iommu);
2003 if (ret) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002004 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002005 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002006 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002007
2008 if (domain_init(domain, gaw)) {
2009 domain_exit(domain);
2010 goto error;
2011 }
2012
2013 /* register pcie-to-pci device */
2014 if (dev_tmp) {
2015 info = alloc_devinfo_mem();
2016 if (!info) {
2017 domain_exit(domain);
2018 goto error;
2019 }
David Woodhouse276dbf992009-04-04 01:45:37 +01002020 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002021 info->bus = bus;
2022 info->devfn = devfn;
2023 info->dev = NULL;
2024 info->domain = domain;
2025 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08002026 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002027
2028 /* pcie-to-pci bridge already has a domain, uses it */
2029 found = NULL;
2030 spin_lock_irqsave(&device_domain_lock, flags);
2031 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01002032 if (tmp->segment == segment &&
2033 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002034 found = tmp->domain;
2035 break;
2036 }
2037 }
2038 if (found) {
Jiri Slaby00dfff72010-06-14 17:17:32 +02002039 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002040 free_devinfo_mem(info);
2041 domain_exit(domain);
2042 domain = found;
2043 } else {
2044 list_add(&info->link, &domain->devices);
2045 list_add(&info->global, &device_domain_list);
Jiri Slaby00dfff72010-06-14 17:17:32 +02002046 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002047 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002048 }
2049
2050found_domain:
2051 info = alloc_devinfo_mem();
2052 if (!info)
2053 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01002054 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002055 info->bus = pdev->bus->number;
2056 info->devfn = pdev->devfn;
2057 info->dev = pdev;
2058 info->domain = domain;
2059 spin_lock_irqsave(&device_domain_lock, flags);
2060 /* somebody is fast */
2061 found = find_domain(pdev);
2062 if (found != NULL) {
2063 spin_unlock_irqrestore(&device_domain_lock, flags);
2064 if (found != domain) {
2065 domain_exit(domain);
2066 domain = found;
2067 }
2068 free_devinfo_mem(info);
2069 return domain;
2070 }
2071 list_add(&info->link, &domain->devices);
2072 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002073 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002074 spin_unlock_irqrestore(&device_domain_lock, flags);
2075 return domain;
2076error:
2077 /* recheck it here, maybe others set it */
2078 return find_domain(pdev);
2079}
2080
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002081static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002082#define IDENTMAP_ALL 1
2083#define IDENTMAP_GFX 2
2084#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002085
David Woodhouseb2132032009-06-26 18:50:28 +01002086static int iommu_domain_identity_map(struct dmar_domain *domain,
2087 unsigned long long start,
2088 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002089{
David Woodhousec5395d52009-06-28 16:35:56 +01002090 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2091 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002092
David Woodhousec5395d52009-06-28 16:35:56 +01002093 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2094 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002095 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002096 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002097 }
2098
David Woodhousec5395d52009-06-28 16:35:56 +01002099 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2100 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002101 /*
2102 * RMRR range might have overlap with physical memory range,
2103 * clear it first
2104 */
David Woodhousec5395d52009-06-28 16:35:56 +01002105 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002106
David Woodhousec5395d52009-06-28 16:35:56 +01002107 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2108 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002109 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002110}
2111
2112static int iommu_prepare_identity_map(struct pci_dev *pdev,
2113 unsigned long long start,
2114 unsigned long long end)
2115{
2116 struct dmar_domain *domain;
2117 int ret;
2118
David Woodhousec7ab48d2009-06-26 19:10:36 +01002119 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002120 if (!domain)
2121 return -ENOMEM;
2122
David Woodhouse19943b02009-08-04 16:19:20 +01002123 /* For _hardware_ passthrough, don't bother. But for software
2124 passthrough, we do it anyway -- it may indicate a memory
2125 range which is reserved in E820, so which didn't get set
2126 up to start with in si_domain */
2127 if (domain == si_domain && hw_pass_through) {
2128 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2129 pci_name(pdev), start, end);
2130 return 0;
2131 }
2132
2133 printk(KERN_INFO
2134 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2135 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002136
David Woodhouse5595b522009-12-02 09:21:55 +00002137 if (end < start) {
2138 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2139 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2140 dmi_get_system_info(DMI_BIOS_VENDOR),
2141 dmi_get_system_info(DMI_BIOS_VERSION),
2142 dmi_get_system_info(DMI_PRODUCT_VERSION));
2143 ret = -EIO;
2144 goto error;
2145 }
2146
David Woodhouse2ff729f2009-08-26 14:25:41 +01002147 if (end >> agaw_to_width(domain->agaw)) {
2148 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2149 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2150 agaw_to_width(domain->agaw),
2151 dmi_get_system_info(DMI_BIOS_VENDOR),
2152 dmi_get_system_info(DMI_BIOS_VERSION),
2153 dmi_get_system_info(DMI_PRODUCT_VERSION));
2154 ret = -EIO;
2155 goto error;
2156 }
David Woodhouse19943b02009-08-04 16:19:20 +01002157
David Woodhouseb2132032009-06-26 18:50:28 +01002158 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002159 if (ret)
2160 goto error;
2161
2162 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002163 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002164 if (ret)
2165 goto error;
2166
2167 return 0;
2168
2169 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002170 domain_exit(domain);
2171 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002172}
2173
2174static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2175 struct pci_dev *pdev)
2176{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002177 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002178 return 0;
2179 return iommu_prepare_identity_map(pdev, rmrr->base_address,
David Woodhouse70e535d2011-05-31 00:22:52 +01002180 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002181}
2182
Suresh Siddhad3f13812011-08-23 17:05:25 -07002183#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002184static inline void iommu_prepare_isa(void)
2185{
2186 struct pci_dev *pdev;
2187 int ret;
2188
2189 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2190 if (!pdev)
2191 return;
2192
David Woodhousec7ab48d2009-06-26 19:10:36 +01002193 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse70e535d2011-05-31 00:22:52 +01002194 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002195
2196 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002197 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2198 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002199
2200}
2201#else
2202static inline void iommu_prepare_isa(void)
2203{
2204 return;
2205}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002206#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002207
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002208static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002209
Matt Kraai071e1372009-08-23 22:30:22 -07002210static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002211{
2212 struct dmar_drhd_unit *drhd;
2213 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002214 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002215
2216 si_domain = alloc_domain();
2217 if (!si_domain)
2218 return -EFAULT;
2219
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002220 for_each_active_iommu(iommu, drhd) {
2221 ret = iommu_attach_domain(si_domain, iommu);
2222 if (ret) {
2223 domain_exit(si_domain);
2224 return -EFAULT;
2225 }
2226 }
2227
2228 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2229 domain_exit(si_domain);
2230 return -EFAULT;
2231 }
2232
2233 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
Jiang Liu9544c002014-01-06 14:18:13 +08002234 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2235 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002236
David Woodhouse19943b02009-08-04 16:19:20 +01002237 if (hw)
2238 return 0;
2239
David Woodhousec7ab48d2009-06-26 19:10:36 +01002240 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002241 unsigned long start_pfn, end_pfn;
2242 int i;
2243
2244 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2245 ret = iommu_domain_identity_map(si_domain,
2246 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2247 if (ret)
2248 return ret;
2249 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002250 }
2251
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002252 return 0;
2253}
2254
2255static void domain_remove_one_dev_info(struct dmar_domain *domain,
2256 struct pci_dev *pdev);
2257static int identity_mapping(struct pci_dev *pdev)
2258{
2259 struct device_domain_info *info;
2260
2261 if (likely(!iommu_identity_mapping))
2262 return 0;
2263
Mike Traviscb452a42011-05-28 13:15:03 -05002264 info = pdev->dev.archdata.iommu;
2265 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2266 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002267
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002268 return 0;
2269}
2270
2271static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002272 struct pci_dev *pdev,
2273 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002274{
2275 struct device_domain_info *info;
2276 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002277 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002278
2279 info = alloc_devinfo_mem();
2280 if (!info)
2281 return -ENOMEM;
2282
2283 info->segment = pci_domain_nr(pdev->bus);
2284 info->bus = pdev->bus->number;
2285 info->devfn = pdev->devfn;
2286 info->dev = pdev;
2287 info->domain = domain;
2288
2289 spin_lock_irqsave(&device_domain_lock, flags);
2290 list_add(&info->link, &domain->devices);
2291 list_add(&info->global, &device_domain_list);
2292 pdev->dev.archdata.iommu = info;
2293 spin_unlock_irqrestore(&device_domain_lock, flags);
2294
David Woodhousee2ad23d2012-05-25 17:42:54 +01002295 ret = domain_context_mapping(domain, pdev, translation);
2296 if (ret) {
2297 spin_lock_irqsave(&device_domain_lock, flags);
David Woodhouse109b9b02012-05-25 17:43:02 +01002298 unlink_domain_info(info);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002299 spin_unlock_irqrestore(&device_domain_lock, flags);
2300 free_devinfo_mem(info);
2301 return ret;
2302 }
2303
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002304 return 0;
2305}
2306
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002307static bool device_has_rmrr(struct pci_dev *dev)
2308{
2309 struct dmar_rmrr_unit *rmrr;
2310 int i;
2311
2312 for_each_rmrr_units(rmrr) {
2313 for (i = 0; i < rmrr->devices_cnt; i++) {
2314 /*
2315 * Return TRUE if this RMRR contains the device that
2316 * is passed in.
2317 */
2318 if (rmrr->devices[i] == dev)
2319 return true;
2320 }
2321 }
2322 return false;
2323}
2324
David Woodhouse6941af22009-07-04 18:24:27 +01002325static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2326{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002327
2328 /*
2329 * We want to prevent any device associated with an RMRR from
2330 * getting placed into the SI Domain. This is done because
2331 * problems exist when devices are moved in and out of domains
2332 * and their respective RMRR info is lost. We exempt USB devices
2333 * from this process due to their usage of RMRRs that are known
2334 * to not be needed after BIOS hand-off to OS.
2335 */
2336 if (device_has_rmrr(pdev) &&
2337 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2338 return 0;
2339
David Woodhousee0fc7e02009-09-30 09:12:17 -07002340 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2341 return 1;
2342
2343 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2344 return 1;
2345
2346 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2347 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002348
David Woodhouse3dfc8132009-07-04 19:11:08 +01002349 /*
2350 * We want to start off with all devices in the 1:1 domain, and
2351 * take them out later if we find they can't access all of memory.
2352 *
2353 * However, we can't do this for PCI devices behind bridges,
2354 * because all PCI devices behind the same bridge will end up
2355 * with the same source-id on their transactions.
2356 *
2357 * Practically speaking, we can't change things around for these
2358 * devices at run-time, because we can't be sure there'll be no
2359 * DMA transactions in flight for any of their siblings.
2360 *
2361 * So PCI devices (unless they're on the root bus) as well as
2362 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2363 * the 1:1 domain, just in _case_ one of their siblings turns out
2364 * not to be able to map all of memory.
2365 */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002366 if (!pci_is_pcie(pdev)) {
David Woodhouse3dfc8132009-07-04 19:11:08 +01002367 if (!pci_is_root_bus(pdev->bus))
2368 return 0;
2369 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2370 return 0;
Yijing Wang62f87c02012-07-24 17:20:03 +08002371 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
David Woodhouse3dfc8132009-07-04 19:11:08 +01002372 return 0;
2373
2374 /*
2375 * At boot time, we don't yet know if devices will be 64-bit capable.
2376 * Assume that they will -- if they turn out not to be, then we can
2377 * take them out of the 1:1 domain later.
2378 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002379 if (!startup) {
2380 /*
2381 * If the device's dma_mask is less than the system's memory
2382 * size then this is not a candidate for identity mapping.
2383 */
2384 u64 dma_mask = pdev->dma_mask;
2385
2386 if (pdev->dev.coherent_dma_mask &&
2387 pdev->dev.coherent_dma_mask < dma_mask)
2388 dma_mask = pdev->dev.coherent_dma_mask;
2389
2390 return dma_mask >= dma_get_required_mask(&pdev->dev);
2391 }
David Woodhouse6941af22009-07-04 18:24:27 +01002392
2393 return 1;
2394}
2395
Matt Kraai071e1372009-08-23 22:30:22 -07002396static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002397{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002398 struct pci_dev *pdev = NULL;
2399 int ret;
2400
David Woodhouse19943b02009-08-04 16:19:20 +01002401 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002402 if (ret)
2403 return -EFAULT;
2404
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002405 for_each_pci_dev(pdev) {
David Woodhouse6941af22009-07-04 18:24:27 +01002406 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse5fe60f42009-08-09 10:53:41 +01002407 ret = domain_add_dev_info(si_domain, pdev,
Mike Traviseae460b2012-03-05 15:05:16 -08002408 hw ? CONTEXT_TT_PASS_THROUGH :
2409 CONTEXT_TT_MULTI_LEVEL);
2410 if (ret) {
2411 /* device not associated with an iommu */
2412 if (ret == -ENODEV)
2413 continue;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002414 return ret;
Mike Traviseae460b2012-03-05 15:05:16 -08002415 }
2416 pr_info("IOMMU: %s identity mapping for device %s\n",
2417 hw ? "hardware" : "software", pci_name(pdev));
David Woodhouse62edf5d2009-07-04 10:59:46 +01002418 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002419 }
2420
2421 return 0;
2422}
2423
Joseph Cihulab7792602011-05-03 00:08:37 -07002424static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002425{
2426 struct dmar_drhd_unit *drhd;
2427 struct dmar_rmrr_unit *rmrr;
2428 struct pci_dev *pdev;
2429 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002430 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002431
2432 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002433 * for each drhd
2434 * allocate root
2435 * initialize and program root entry to not present
2436 * endfor
2437 */
2438 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002439 /*
2440 * lock not needed as this is only incremented in the single
2441 * threaded kernel __init code path all other access are read
2442 * only
2443 */
Mike Travis1b198bb2012-03-05 15:05:16 -08002444 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2445 g_num_of_iommus++;
2446 continue;
2447 }
2448 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2449 IOMMU_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002450 }
2451
Weidong Hand9630fe2008-12-08 11:06:32 +08002452 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2453 GFP_KERNEL);
2454 if (!g_iommus) {
2455 printk(KERN_ERR "Allocating global iommu array failed\n");
2456 ret = -ENOMEM;
2457 goto error;
2458 }
2459
mark gross80b20dd2008-04-18 13:53:58 -07002460 deferred_flush = kzalloc(g_num_of_iommus *
2461 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2462 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002463 ret = -ENOMEM;
2464 goto error;
2465 }
2466
Jiang Liu7c919772014-01-06 14:18:18 +08002467 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002468 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002469
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002470 ret = iommu_init_domains(iommu);
2471 if (ret)
2472 goto error;
2473
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002474 /*
2475 * TBD:
2476 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002477 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002478 */
2479 ret = iommu_alloc_root_entry(iommu);
2480 if (ret) {
2481 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2482 goto error;
2483 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002484 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002485 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002486 }
2487
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002488 /*
2489 * Start from the sane iommu hardware state.
2490 */
Jiang Liu7c919772014-01-06 14:18:18 +08002491 for_each_active_iommu(iommu, drhd) {
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002492 /*
2493 * If the queued invalidation is already initialized by us
2494 * (for example, while enabling interrupt-remapping) then
2495 * we got the things already rolling from a sane state.
2496 */
2497 if (iommu->qi)
2498 continue;
2499
2500 /*
2501 * Clear any previous faults.
2502 */
2503 dmar_fault(-1, iommu);
2504 /*
2505 * Disable queued invalidation if supported and already enabled
2506 * before OS handover.
2507 */
2508 dmar_disable_qi(iommu);
2509 }
2510
Jiang Liu7c919772014-01-06 14:18:18 +08002511 for_each_active_iommu(iommu, drhd) {
Youquan Songa77b67d2008-10-16 16:31:56 -07002512 if (dmar_enable_qi(iommu)) {
2513 /*
2514 * Queued Invalidate not enabled, use Register Based
2515 * Invalidate
2516 */
2517 iommu->flush.flush_context = __iommu_flush_context;
2518 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002519 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002520 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002521 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002522 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002523 } else {
2524 iommu->flush.flush_context = qi_flush_context;
2525 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002526 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002527 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002528 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002529 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002530 }
2531 }
2532
David Woodhouse19943b02009-08-04 16:19:20 +01002533 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002534 iommu_identity_mapping |= IDENTMAP_ALL;
2535
Suresh Siddhad3f13812011-08-23 17:05:25 -07002536#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002537 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002538#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002539
2540 check_tylersburg_isoch();
2541
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002542 /*
2543 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002544 * identity mappings for rmrr, gfx, and isa and may fall back to static
2545 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002546 */
David Woodhouse19943b02009-08-04 16:19:20 +01002547 if (iommu_identity_mapping) {
2548 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2549 if (ret) {
2550 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2551 goto error;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002552 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002553 }
David Woodhouse19943b02009-08-04 16:19:20 +01002554 /*
2555 * For each rmrr
2556 * for each dev attached to rmrr
2557 * do
2558 * locate drhd for dev, alloc domain for dev
2559 * allocate free domain
2560 * allocate page table entries for rmrr
2561 * if context not allocated for bus
2562 * allocate and init context
2563 * set present in root table for this bus
2564 * init context with domain, translation etc
2565 * endfor
2566 * endfor
2567 */
2568 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2569 for_each_rmrr_units(rmrr) {
2570 for (i = 0; i < rmrr->devices_cnt; i++) {
2571 pdev = rmrr->devices[i];
2572 /*
2573 * some BIOS lists non-exist devices in DMAR
2574 * table.
2575 */
2576 if (!pdev)
2577 continue;
2578 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2579 if (ret)
2580 printk(KERN_ERR
2581 "IOMMU: mapping reserved region failed\n");
2582 }
2583 }
2584
2585 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002586
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002587 /*
2588 * for each drhd
2589 * enable fault log
2590 * global invalidate context cache
2591 * global invalidate iotlb
2592 * enable translation
2593 */
Jiang Liu7c919772014-01-06 14:18:18 +08002594 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002595 if (drhd->ignored) {
2596 /*
2597 * we always have to disable PMRs or DMA may fail on
2598 * this device
2599 */
2600 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08002601 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002602 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002603 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002604
2605 iommu_flush_write_buffer(iommu);
2606
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002607 ret = dmar_set_interrupt(iommu);
2608 if (ret)
2609 goto error;
2610
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002611 iommu_set_root_entry(iommu);
2612
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002613 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002614 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002615
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002616 ret = iommu_enable_translation(iommu);
2617 if (ret)
2618 goto error;
David Woodhouseb94996c2009-09-19 15:28:12 -07002619
2620 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002621 }
2622
2623 return 0;
2624error:
Jiang Liu7c919772014-01-06 14:18:18 +08002625 for_each_active_iommu(iommu, drhd)
Jiang Liua868e6b2014-01-06 14:18:20 +08002626 free_dmar_iommu(iommu);
Jiang Liu9bdc5312014-01-06 14:18:27 +08002627 kfree(deferred_flush);
Weidong Hand9630fe2008-12-08 11:06:32 +08002628 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002629 return ret;
2630}
2631
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002632/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002633static struct iova *intel_alloc_iova(struct device *dev,
2634 struct dmar_domain *domain,
2635 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002636{
2637 struct pci_dev *pdev = to_pci_dev(dev);
2638 struct iova *iova = NULL;
2639
David Woodhouse875764d2009-06-28 21:20:51 +01002640 /* Restrict dma_mask to the width that the iommu can handle */
2641 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2642
2643 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002644 /*
2645 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002646 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002647 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002648 */
David Woodhouse875764d2009-06-28 21:20:51 +01002649 iova = alloc_iova(&domain->iovad, nrpages,
2650 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2651 if (iova)
2652 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002653 }
David Woodhouse875764d2009-06-28 21:20:51 +01002654 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2655 if (unlikely(!iova)) {
2656 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2657 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002658 return NULL;
2659 }
2660
2661 return iova;
2662}
2663
David Woodhouse147202a2009-07-07 19:43:20 +01002664static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002665{
2666 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002667 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002668
2669 domain = get_domain_for_dev(pdev,
2670 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2671 if (!domain) {
2672 printk(KERN_ERR
2673 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002674 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002675 }
2676
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002677 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002678 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002679 ret = domain_context_mapping(domain, pdev,
2680 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002681 if (ret) {
2682 printk(KERN_ERR
2683 "Domain context map for %s failed",
2684 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002685 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002686 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002687 }
2688
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002689 return domain;
2690}
2691
David Woodhouse147202a2009-07-07 19:43:20 +01002692static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2693{
2694 struct device_domain_info *info;
2695
2696 /* No lock here, assumes no domain exit in normal case */
2697 info = dev->dev.archdata.iommu;
2698 if (likely(info))
2699 return info->domain;
2700
2701 return __get_valid_domain_for_dev(dev);
2702}
2703
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002704static int iommu_dummy(struct pci_dev *pdev)
2705{
2706 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2707}
2708
2709/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002710static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002711{
David Woodhouse73676832009-07-04 14:08:36 +01002712 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002713 int found;
2714
Yijing Wangdbad0862013-12-05 19:43:42 +08002715 if (unlikely(!dev_is_pci(dev)))
David Woodhouse73676832009-07-04 14:08:36 +01002716 return 1;
2717
2718 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002719 if (iommu_dummy(pdev))
2720 return 1;
2721
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002722 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002723 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002724
2725 found = identity_mapping(pdev);
2726 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002727 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002728 return 1;
2729 else {
2730 /*
2731 * 32 bit DMA is removed from si_domain and fall back
2732 * to non-identity mapping.
2733 */
2734 domain_remove_one_dev_info(si_domain, pdev);
2735 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2736 pci_name(pdev));
2737 return 0;
2738 }
2739 } else {
2740 /*
2741 * In case of a detached 64 bit DMA device from vm, the device
2742 * is put into si_domain for identity mapping.
2743 */
David Woodhouse6941af22009-07-04 18:24:27 +01002744 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002745 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002746 ret = domain_add_dev_info(si_domain, pdev,
2747 hw_pass_through ?
2748 CONTEXT_TT_PASS_THROUGH :
2749 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002750 if (!ret) {
2751 printk(KERN_INFO "64bit %s uses identity mapping\n",
2752 pci_name(pdev));
2753 return 1;
2754 }
2755 }
2756 }
2757
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002758 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002759}
2760
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002761static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2762 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002763{
2764 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002765 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002766 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002767 struct iova *iova;
2768 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002769 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002770 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002771 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002772
2773 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002774
David Woodhouse73676832009-07-04 14:08:36 +01002775 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002776 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002777
2778 domain = get_valid_domain_for_dev(pdev);
2779 if (!domain)
2780 return 0;
2781
Weidong Han8c11e792008-12-08 15:29:22 +08002782 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002783 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002784
Mike Travisc681d0b2011-05-28 13:15:05 -05002785 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002786 if (!iova)
2787 goto error;
2788
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002789 /*
2790 * Check if DMAR supports zero-length reads on write only
2791 * mappings..
2792 */
2793 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002794 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002795 prot |= DMA_PTE_READ;
2796 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2797 prot |= DMA_PTE_WRITE;
2798 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002799 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002800 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002801 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002802 * is not a big problem
2803 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002804 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002805 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002806 if (ret)
2807 goto error;
2808
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002809 /* it's a non-present to present mapping. Only flush if caching mode */
2810 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002811 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002812 else
Weidong Han8c11e792008-12-08 15:29:22 +08002813 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002814
David Woodhouse03d6a242009-06-28 15:33:46 +01002815 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2816 start_paddr += paddr & ~PAGE_MASK;
2817 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002818
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002819error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002820 if (iova)
2821 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002822 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002823 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002824 return 0;
2825}
2826
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002827static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2828 unsigned long offset, size_t size,
2829 enum dma_data_direction dir,
2830 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002831{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002832 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2833 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002834}
2835
mark gross5e0d2a62008-03-04 15:22:08 -08002836static void flush_unmaps(void)
2837{
mark gross80b20dd2008-04-18 13:53:58 -07002838 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002839
mark gross5e0d2a62008-03-04 15:22:08 -08002840 timer_on = 0;
2841
2842 /* just flush them all */
2843 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002844 struct intel_iommu *iommu = g_iommus[i];
2845 if (!iommu)
2846 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002847
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002848 if (!deferred_flush[i].next)
2849 continue;
2850
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002851 /* In caching mode, global flushes turn emulation expensive */
2852 if (!cap_caching_mode(iommu->cap))
2853 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002854 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002855 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002856 unsigned long mask;
2857 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002858 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08002859
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002860 /* On real hardware multiple invalidations are expensive */
2861 if (cap_caching_mode(iommu->cap))
2862 iommu_flush_iotlb_psi(iommu, domain->id,
2863 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2864 else {
2865 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2866 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2867 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2868 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002869 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002870 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002871 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002872 }
2873
mark gross5e0d2a62008-03-04 15:22:08 -08002874 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002875}
2876
2877static void flush_unmaps_timeout(unsigned long data)
2878{
mark gross80b20dd2008-04-18 13:53:58 -07002879 unsigned long flags;
2880
2881 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002882 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002883 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002884}
2885
2886static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2887{
2888 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002889 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002890 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002891
2892 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002893 if (list_size == HIGH_WATER_MARK)
2894 flush_unmaps();
2895
Weidong Han8c11e792008-12-08 15:29:22 +08002896 iommu = domain_get_iommu(dom);
2897 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002898
mark gross80b20dd2008-04-18 13:53:58 -07002899 next = deferred_flush[iommu_id].next;
2900 deferred_flush[iommu_id].domain[next] = dom;
2901 deferred_flush[iommu_id].iova[next] = iova;
2902 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002903
2904 if (!timer_on) {
2905 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2906 timer_on = 1;
2907 }
2908 list_size++;
2909 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2910}
2911
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002912static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2913 size_t size, enum dma_data_direction dir,
2914 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002915{
2916 struct pci_dev *pdev = to_pci_dev(dev);
2917 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002918 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002919 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002920 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002921
David Woodhouse73676832009-07-04 14:08:36 +01002922 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002923 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002924
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002925 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002926 BUG_ON(!domain);
2927
Weidong Han8c11e792008-12-08 15:29:22 +08002928 iommu = domain_get_iommu(domain);
2929
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002930 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002931 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2932 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002933 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002934
David Woodhoused794dc92009-06-28 00:27:49 +01002935 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2936 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002937
David Woodhoused794dc92009-06-28 00:27:49 +01002938 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2939 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002940
2941 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002942 dma_pte_clear_range(domain, start_pfn, last_pfn);
2943
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002944 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002945 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2946
mark gross5e0d2a62008-03-04 15:22:08 -08002947 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002948 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002949 last_pfn - start_pfn + 1, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08002950 /* free iova */
2951 __free_iova(&domain->iovad, iova);
2952 } else {
2953 add_unmap(domain, iova);
2954 /*
2955 * queue up the release of the unmap to save the 1/6th of the
2956 * cpu used up by the iotlb flush operation...
2957 */
mark gross5e0d2a62008-03-04 15:22:08 -08002958 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002959}
2960
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002961static void *intel_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02002962 dma_addr_t *dma_handle, gfp_t flags,
2963 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002964{
2965 void *vaddr;
2966 int order;
2967
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002968 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002969 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07002970
2971 if (!iommu_no_mapping(hwdev))
2972 flags &= ~(GFP_DMA | GFP_DMA32);
2973 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2974 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2975 flags |= GFP_DMA;
2976 else
2977 flags |= GFP_DMA32;
2978 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002979
2980 vaddr = (void *)__get_free_pages(flags, order);
2981 if (!vaddr)
2982 return NULL;
2983 memset(vaddr, 0, size);
2984
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002985 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2986 DMA_BIDIRECTIONAL,
2987 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002988 if (*dma_handle)
2989 return vaddr;
2990 free_pages((unsigned long)vaddr, order);
2991 return NULL;
2992}
2993
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002994static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02002995 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002996{
2997 int order;
2998
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002999 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003000 order = get_order(size);
3001
David Woodhouse0db9b7a2009-07-14 02:01:57 +01003002 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003003 free_pages((unsigned long)vaddr, order);
3004}
3005
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003006static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
3007 int nelems, enum dma_data_direction dir,
3008 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003009{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003010 struct pci_dev *pdev = to_pci_dev(hwdev);
3011 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003012 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003013 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003014 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003015
David Woodhouse73676832009-07-04 14:08:36 +01003016 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003017 return;
3018
3019 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08003020 BUG_ON(!domain);
3021
3022 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003023
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003024 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01003025 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3026 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003027 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003028
David Woodhoused794dc92009-06-28 00:27:49 +01003029 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3030 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003031
3032 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01003033 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003034
David Woodhoused794dc92009-06-28 00:27:49 +01003035 /* free page tables */
3036 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3037
David Woodhouseacea0012009-07-14 01:55:11 +01003038 if (intel_iommu_strict) {
3039 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03003040 last_pfn - start_pfn + 1, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003041 /* free iova */
3042 __free_iova(&domain->iovad, iova);
3043 } else {
3044 add_unmap(domain, iova);
3045 /*
3046 * queue up the release of the unmap to save the 1/6th of the
3047 * cpu used up by the iotlb flush operation...
3048 */
3049 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003050}
3051
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003052static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003053 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003054{
3055 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003056 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003057
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003058 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003059 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003060 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003061 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003062 }
3063 return nelems;
3064}
3065
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003066static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3067 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003068{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003069 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003070 struct pci_dev *pdev = to_pci_dev(hwdev);
3071 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003072 size_t size = 0;
3073 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003074 struct iova *iova = NULL;
3075 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003076 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003077 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003078 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003079
3080 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01003081 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003082 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003083
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003084 domain = get_valid_domain_for_dev(pdev);
3085 if (!domain)
3086 return 0;
3087
Weidong Han8c11e792008-12-08 15:29:22 +08003088 iommu = domain_get_iommu(domain);
3089
David Woodhouseb536d242009-06-28 14:49:31 +01003090 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003091 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003092
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003093 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3094 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003095 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003096 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003097 return 0;
3098 }
3099
3100 /*
3101 * Check if DMAR supports zero-length reads on write only
3102 * mappings..
3103 */
3104 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003105 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003106 prot |= DMA_PTE_READ;
3107 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3108 prot |= DMA_PTE_WRITE;
3109
David Woodhouseb536d242009-06-28 14:49:31 +01003110 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003111
Fenghua Yuf5329592009-08-04 15:09:37 -07003112 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003113 if (unlikely(ret)) {
3114 /* clear the page */
3115 dma_pte_clear_range(domain, start_vpfn,
3116 start_vpfn + size - 1);
3117 /* free page tables */
3118 dma_pte_free_pagetable(domain, start_vpfn,
3119 start_vpfn + size - 1);
3120 /* free iova */
3121 __free_iova(&domain->iovad, iova);
3122 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003123 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003124
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003125 /* it's a non-present to present mapping. Only flush if caching mode */
3126 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03003127 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003128 else
Weidong Han8c11e792008-12-08 15:29:22 +08003129 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003130
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003131 return nelems;
3132}
3133
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003134static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3135{
3136 return !dma_addr;
3137}
3138
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003139struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003140 .alloc = intel_alloc_coherent,
3141 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003142 .map_sg = intel_map_sg,
3143 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003144 .map_page = intel_map_page,
3145 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003146 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003147};
3148
3149static inline int iommu_domain_cache_init(void)
3150{
3151 int ret = 0;
3152
3153 iommu_domain_cache = kmem_cache_create("iommu_domain",
3154 sizeof(struct dmar_domain),
3155 0,
3156 SLAB_HWCACHE_ALIGN,
3157
3158 NULL);
3159 if (!iommu_domain_cache) {
3160 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3161 ret = -ENOMEM;
3162 }
3163
3164 return ret;
3165}
3166
3167static inline int iommu_devinfo_cache_init(void)
3168{
3169 int ret = 0;
3170
3171 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3172 sizeof(struct device_domain_info),
3173 0,
3174 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003175 NULL);
3176 if (!iommu_devinfo_cache) {
3177 printk(KERN_ERR "Couldn't create devinfo cache\n");
3178 ret = -ENOMEM;
3179 }
3180
3181 return ret;
3182}
3183
3184static inline int iommu_iova_cache_init(void)
3185{
3186 int ret = 0;
3187
3188 iommu_iova_cache = kmem_cache_create("iommu_iova",
3189 sizeof(struct iova),
3190 0,
3191 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003192 NULL);
3193 if (!iommu_iova_cache) {
3194 printk(KERN_ERR "Couldn't create iova cache\n");
3195 ret = -ENOMEM;
3196 }
3197
3198 return ret;
3199}
3200
3201static int __init iommu_init_mempool(void)
3202{
3203 int ret;
3204 ret = iommu_iova_cache_init();
3205 if (ret)
3206 return ret;
3207
3208 ret = iommu_domain_cache_init();
3209 if (ret)
3210 goto domain_error;
3211
3212 ret = iommu_devinfo_cache_init();
3213 if (!ret)
3214 return ret;
3215
3216 kmem_cache_destroy(iommu_domain_cache);
3217domain_error:
3218 kmem_cache_destroy(iommu_iova_cache);
3219
3220 return -ENOMEM;
3221}
3222
3223static void __init iommu_exit_mempool(void)
3224{
3225 kmem_cache_destroy(iommu_devinfo_cache);
3226 kmem_cache_destroy(iommu_domain_cache);
3227 kmem_cache_destroy(iommu_iova_cache);
3228
3229}
3230
Dan Williams556ab452010-07-23 15:47:56 -07003231static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3232{
3233 struct dmar_drhd_unit *drhd;
3234 u32 vtbar;
3235 int rc;
3236
3237 /* We know that this device on this chipset has its own IOMMU.
3238 * If we find it under a different IOMMU, then the BIOS is lying
3239 * to us. Hope that the IOMMU for this device is actually
3240 * disabled, and it needs no translation...
3241 */
3242 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3243 if (rc) {
3244 /* "can't" happen */
3245 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3246 return;
3247 }
3248 vtbar &= 0xffff0000;
3249
3250 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3251 drhd = dmar_find_matched_drhd_unit(pdev);
3252 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3253 TAINT_FIRMWARE_WORKAROUND,
3254 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3255 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3256}
3257DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3258
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003259static void __init init_no_remapping_devices(void)
3260{
3261 struct dmar_drhd_unit *drhd;
3262
3263 for_each_drhd_unit(drhd) {
3264 if (!drhd->include_all) {
3265 int i;
3266 for (i = 0; i < drhd->devices_cnt; i++)
3267 if (drhd->devices[i] != NULL)
3268 break;
3269 /* ignore DMAR unit if no pci devices exist */
3270 if (i == drhd->devices_cnt)
3271 drhd->ignored = 1;
3272 }
3273 }
3274
Jiang Liu7c919772014-01-06 14:18:18 +08003275 for_each_active_drhd_unit(drhd) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003276 int i;
Jiang Liu7c919772014-01-06 14:18:18 +08003277 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003278 continue;
3279
3280 for (i = 0; i < drhd->devices_cnt; i++)
3281 if (drhd->devices[i] &&
David Woodhousec0771df2011-10-14 20:59:46 +01003282 !IS_GFX_DEVICE(drhd->devices[i]))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003283 break;
3284
3285 if (i < drhd->devices_cnt)
3286 continue;
3287
David Woodhousec0771df2011-10-14 20:59:46 +01003288 /* This IOMMU has *only* gfx devices. Either bypass it or
3289 set the gfx_mapped flag, as appropriate */
3290 if (dmar_map_gfx) {
3291 intel_iommu_gfx_mapped = 1;
3292 } else {
3293 drhd->ignored = 1;
3294 for (i = 0; i < drhd->devices_cnt; i++) {
3295 if (!drhd->devices[i])
3296 continue;
3297 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3298 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003299 }
3300 }
3301}
3302
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003303#ifdef CONFIG_SUSPEND
3304static int init_iommu_hw(void)
3305{
3306 struct dmar_drhd_unit *drhd;
3307 struct intel_iommu *iommu = NULL;
3308
3309 for_each_active_iommu(iommu, drhd)
3310 if (iommu->qi)
3311 dmar_reenable_qi(iommu);
3312
Joseph Cihulab7792602011-05-03 00:08:37 -07003313 for_each_iommu(iommu, drhd) {
3314 if (drhd->ignored) {
3315 /*
3316 * we always have to disable PMRs or DMA may fail on
3317 * this device
3318 */
3319 if (force_on)
3320 iommu_disable_protect_mem_regions(iommu);
3321 continue;
3322 }
3323
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003324 iommu_flush_write_buffer(iommu);
3325
3326 iommu_set_root_entry(iommu);
3327
3328 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003329 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003330 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003331 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003332 if (iommu_enable_translation(iommu))
3333 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003334 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003335 }
3336
3337 return 0;
3338}
3339
3340static void iommu_flush_all(void)
3341{
3342 struct dmar_drhd_unit *drhd;
3343 struct intel_iommu *iommu;
3344
3345 for_each_active_iommu(iommu, drhd) {
3346 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003347 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003348 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003349 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003350 }
3351}
3352
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003353static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003354{
3355 struct dmar_drhd_unit *drhd;
3356 struct intel_iommu *iommu = NULL;
3357 unsigned long flag;
3358
3359 for_each_active_iommu(iommu, drhd) {
3360 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3361 GFP_ATOMIC);
3362 if (!iommu->iommu_state)
3363 goto nomem;
3364 }
3365
3366 iommu_flush_all();
3367
3368 for_each_active_iommu(iommu, drhd) {
3369 iommu_disable_translation(iommu);
3370
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003371 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003372
3373 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3374 readl(iommu->reg + DMAR_FECTL_REG);
3375 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3376 readl(iommu->reg + DMAR_FEDATA_REG);
3377 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3378 readl(iommu->reg + DMAR_FEADDR_REG);
3379 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3380 readl(iommu->reg + DMAR_FEUADDR_REG);
3381
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003382 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003383 }
3384 return 0;
3385
3386nomem:
3387 for_each_active_iommu(iommu, drhd)
3388 kfree(iommu->iommu_state);
3389
3390 return -ENOMEM;
3391}
3392
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003393static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003394{
3395 struct dmar_drhd_unit *drhd;
3396 struct intel_iommu *iommu = NULL;
3397 unsigned long flag;
3398
3399 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003400 if (force_on)
3401 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3402 else
3403 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003404 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003405 }
3406
3407 for_each_active_iommu(iommu, drhd) {
3408
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003409 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003410
3411 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3412 iommu->reg + DMAR_FECTL_REG);
3413 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3414 iommu->reg + DMAR_FEDATA_REG);
3415 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3416 iommu->reg + DMAR_FEADDR_REG);
3417 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3418 iommu->reg + DMAR_FEUADDR_REG);
3419
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003420 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003421 }
3422
3423 for_each_active_iommu(iommu, drhd)
3424 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003425}
3426
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003427static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003428 .resume = iommu_resume,
3429 .suspend = iommu_suspend,
3430};
3431
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003432static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003433{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003434 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003435}
3436
3437#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003438static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003439#endif /* CONFIG_PM */
3440
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003441LIST_HEAD(dmar_rmrr_units);
3442
3443static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3444{
3445 list_add(&rmrr->list, &dmar_rmrr_units);
3446}
3447
3448
3449int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3450{
3451 struct acpi_dmar_reserved_memory *rmrr;
3452 struct dmar_rmrr_unit *rmrru;
3453
3454 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3455 if (!rmrru)
3456 return -ENOMEM;
3457
3458 rmrru->hdr = header;
3459 rmrr = (struct acpi_dmar_reserved_memory *)header;
3460 rmrru->base_address = rmrr->base_address;
3461 rmrru->end_address = rmrr->end_address;
3462
3463 dmar_register_rmrr_unit(rmrru);
3464 return 0;
3465}
3466
3467static int __init
3468rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3469{
3470 struct acpi_dmar_reserved_memory *rmrr;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003471
3472 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
Jiang Liu9bdc5312014-01-06 14:18:27 +08003473 return dmar_parse_dev_scope((void *)(rmrr + 1),
3474 ((void *)rmrr) + rmrr->header.length,
3475 &rmrru->devices_cnt, &rmrru->devices,
3476 rmrr->segment);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003477}
3478
3479static LIST_HEAD(dmar_atsr_units);
3480
3481int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3482{
3483 struct acpi_dmar_atsr *atsr;
3484 struct dmar_atsr_unit *atsru;
3485
3486 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3487 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3488 if (!atsru)
3489 return -ENOMEM;
3490
3491 atsru->hdr = hdr;
3492 atsru->include_all = atsr->flags & 0x1;
3493
3494 list_add(&atsru->list, &dmar_atsr_units);
3495
3496 return 0;
3497}
3498
3499static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3500{
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003501 struct acpi_dmar_atsr *atsr;
3502
3503 if (atsru->include_all)
3504 return 0;
3505
3506 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
Jiang Liu9bdc5312014-01-06 14:18:27 +08003507 return dmar_parse_dev_scope((void *)(atsr + 1),
3508 (void *)atsr + atsr->header.length,
3509 &atsru->devices_cnt, &atsru->devices,
3510 atsr->segment);
3511}
3512
3513static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3514{
3515 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3516 kfree(atsru);
3517}
3518
3519static void intel_iommu_free_dmars(void)
3520{
3521 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3522 struct dmar_atsr_unit *atsru, *atsr_n;
3523
3524 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3525 list_del(&rmrru->list);
3526 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3527 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003528 }
3529
Jiang Liu9bdc5312014-01-06 14:18:27 +08003530 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3531 list_del(&atsru->list);
3532 intel_iommu_free_atsr(atsru);
3533 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003534}
3535
3536int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3537{
3538 int i;
3539 struct pci_bus *bus;
3540 struct acpi_dmar_atsr *atsr;
3541 struct dmar_atsr_unit *atsru;
3542
3543 dev = pci_physfn(dev);
3544
3545 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3546 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3547 if (atsr->segment == pci_domain_nr(dev->bus))
3548 goto found;
3549 }
3550
3551 return 0;
3552
3553found:
3554 for (bus = dev->bus; bus; bus = bus->parent) {
3555 struct pci_dev *bridge = bus->self;
3556
3557 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003558 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003559 return 0;
3560
Yijing Wang62f87c02012-07-24 17:20:03 +08003561 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003562 for (i = 0; i < atsru->devices_cnt; i++)
3563 if (atsru->devices[i] == bridge)
3564 return 1;
3565 break;
3566 }
3567 }
3568
3569 if (atsru->include_all)
3570 return 1;
3571
3572 return 0;
3573}
3574
Sergey Senozhatskyc8f369a2011-10-26 18:45:39 +03003575int __init dmar_parse_rmrr_atsr_dev(void)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003576{
Jiang Liu9bdc5312014-01-06 14:18:27 +08003577 struct dmar_rmrr_unit *rmrr;
3578 struct dmar_atsr_unit *atsr;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003579 int ret = 0;
3580
Jiang Liu9bdc5312014-01-06 14:18:27 +08003581 list_for_each_entry(rmrr, &dmar_rmrr_units, list) {
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003582 ret = rmrr_parse_dev(rmrr);
3583 if (ret)
3584 return ret;
3585 }
3586
Jiang Liu9bdc5312014-01-06 14:18:27 +08003587 list_for_each_entry(atsr, &dmar_atsr_units, list) {
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003588 ret = atsr_parse_dev(atsr);
3589 if (ret)
3590 return ret;
3591 }
3592
3593 return ret;
3594}
3595
Fenghua Yu99dcade2009-11-11 07:23:06 -08003596/*
3597 * Here we only respond to action of unbound device from driver.
3598 *
3599 * Added device is not attached to its DMAR domain here yet. That will happen
3600 * when mapping the device to iova.
3601 */
3602static int device_notifier(struct notifier_block *nb,
3603 unsigned long action, void *data)
3604{
3605 struct device *dev = data;
3606 struct pci_dev *pdev = to_pci_dev(dev);
3607 struct dmar_domain *domain;
3608
David Woodhouse44cd6132009-12-02 10:18:30 +00003609 if (iommu_no_mapping(dev))
3610 return 0;
3611
Fenghua Yu99dcade2009-11-11 07:23:06 -08003612 domain = find_domain(pdev);
3613 if (!domain)
3614 return 0;
3615
Alex Williamsona97590e2011-03-04 14:52:16 -07003616 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
Fenghua Yu99dcade2009-11-11 07:23:06 -08003617 domain_remove_one_dev_info(domain, pdev);
3618
Alex Williamsona97590e2011-03-04 14:52:16 -07003619 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3620 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3621 list_empty(&domain->devices))
3622 domain_exit(domain);
3623 }
3624
Fenghua Yu99dcade2009-11-11 07:23:06 -08003625 return 0;
3626}
3627
3628static struct notifier_block device_nb = {
3629 .notifier_call = device_notifier,
3630};
3631
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003632int __init intel_iommu_init(void)
3633{
Jiang Liu9bdc5312014-01-06 14:18:27 +08003634 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09003635 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08003636 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003637
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003638 /* VT-d is required for a TXT/tboot launch, so enforce that */
3639 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003640
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003641 if (dmar_table_init()) {
3642 if (force_on)
3643 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003644 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003645 }
3646
Takao Indoh3a93c842013-04-23 17:35:03 +09003647 /*
3648 * Disable translation if already enabled prior to OS handover.
3649 */
Jiang Liu7c919772014-01-06 14:18:18 +08003650 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09003651 if (iommu->gcmd & DMA_GCMD_TE)
3652 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09003653
Suresh Siddhac2c72862011-08-23 17:05:19 -07003654 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003655 if (force_on)
3656 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003657 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003658 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003659
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003660 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08003661 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07003662
Joseph Cihula51a63e62011-03-21 11:04:24 -07003663 if (iommu_init_mempool()) {
3664 if (force_on)
3665 panic("tboot: Failed to initialize iommu memory\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003666 goto out_free_dmar;
Joseph Cihula51a63e62011-03-21 11:04:24 -07003667 }
3668
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003669 if (list_empty(&dmar_rmrr_units))
3670 printk(KERN_INFO "DMAR: No RMRR found\n");
3671
3672 if (list_empty(&dmar_atsr_units))
3673 printk(KERN_INFO "DMAR: No ATSR found\n");
3674
Joseph Cihula51a63e62011-03-21 11:04:24 -07003675 if (dmar_init_reserved_ranges()) {
3676 if (force_on)
3677 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003678 goto out_free_mempool;
Joseph Cihula51a63e62011-03-21 11:04:24 -07003679 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003680
3681 init_no_remapping_devices();
3682
Joseph Cihulab7792602011-05-03 00:08:37 -07003683 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003684 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003685 if (force_on)
3686 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003687 printk(KERN_ERR "IOMMU: dmar init failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003688 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003689 }
3690 printk(KERN_INFO
3691 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3692
mark gross5e0d2a62008-03-04 15:22:08 -08003693 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003694#ifdef CONFIG_SWIOTLB
3695 swiotlb = 0;
3696#endif
David Woodhouse19943b02009-08-04 16:19:20 +01003697 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003698
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003699 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003700
Joerg Roedel4236d97d2011-09-06 17:56:07 +02003701 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003702
Fenghua Yu99dcade2009-11-11 07:23:06 -08003703 bus_register_notifier(&pci_bus_type, &device_nb);
3704
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02003705 intel_iommu_enabled = 1;
3706
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003707 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08003708
3709out_free_reserved_range:
3710 put_iova_domain(&reserved_iova_list);
3711out_free_mempool:
3712 iommu_exit_mempool();
3713out_free_dmar:
3714 intel_iommu_free_dmars();
3715 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003716}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003717
Han, Weidong3199aa62009-02-26 17:31:12 +08003718static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3719 struct pci_dev *pdev)
3720{
3721 struct pci_dev *tmp, *parent;
3722
3723 if (!iommu || !pdev)
3724 return;
3725
3726 /* dependent device detach */
3727 tmp = pci_find_upstream_pcie_bridge(pdev);
3728 /* Secondary interface's bus number and devfn 0 */
3729 if (tmp) {
3730 parent = pdev->bus->self;
3731 while (parent != tmp) {
3732 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003733 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003734 parent = parent->bus->self;
3735 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05003736 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08003737 iommu_detach_dev(iommu,
3738 tmp->subordinate->number, 0);
3739 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003740 iommu_detach_dev(iommu, tmp->bus->number,
3741 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003742 }
3743}
3744
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003745static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003746 struct pci_dev *pdev)
3747{
Yijing Wangbca2b912013-10-31 17:26:04 +08003748 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08003749 struct intel_iommu *iommu;
3750 unsigned long flags;
3751 int found = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +08003752
David Woodhouse276dbf992009-04-04 01:45:37 +01003753 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3754 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003755 if (!iommu)
3756 return;
3757
3758 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08003759 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
Mike Habeck8519dc42011-05-28 13:15:07 -05003760 if (info->segment == pci_domain_nr(pdev->bus) &&
3761 info->bus == pdev->bus->number &&
Weidong Hanc7151a82008-12-08 22:51:37 +08003762 info->devfn == pdev->devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01003763 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003764 spin_unlock_irqrestore(&device_domain_lock, flags);
3765
Yu Zhao93a23a72009-05-18 13:51:37 +08003766 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003767 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003768 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003769 free_devinfo_mem(info);
3770
3771 spin_lock_irqsave(&device_domain_lock, flags);
3772
3773 if (found)
3774 break;
3775 else
3776 continue;
3777 }
3778
3779 /* if there is no other devices under the same iommu
3780 * owned by this domain, clear this iommu in iommu_bmp
3781 * update iommu count and coherency
3782 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003783 if (iommu == device_to_iommu(info->segment, info->bus,
3784 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003785 found = 1;
3786 }
3787
Roland Dreier3e7abe22011-07-20 06:22:21 -07003788 spin_unlock_irqrestore(&device_domain_lock, flags);
3789
Weidong Hanc7151a82008-12-08 22:51:37 +08003790 if (found == 0) {
3791 unsigned long tmp_flags;
3792 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08003793 clear_bit(iommu->seq_id, domain->iommu_bmp);
Weidong Hanc7151a82008-12-08 22:51:37 +08003794 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003795 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003796 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07003797
Alex Williamson9b4554b2011-05-24 12:19:04 -04003798 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3799 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3800 spin_lock_irqsave(&iommu->lock, tmp_flags);
3801 clear_bit(domain->id, iommu->domain_ids);
3802 iommu->domains[domain->id] = NULL;
3803 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3804 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003805 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003806}
3807
3808static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3809{
3810 struct device_domain_info *info;
3811 struct intel_iommu *iommu;
3812 unsigned long flags1, flags2;
3813
3814 spin_lock_irqsave(&device_domain_lock, flags1);
3815 while (!list_empty(&domain->devices)) {
3816 info = list_entry(domain->devices.next,
3817 struct device_domain_info, link);
David Woodhouse109b9b02012-05-25 17:43:02 +01003818 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003819 spin_unlock_irqrestore(&device_domain_lock, flags1);
3820
Yu Zhao93a23a72009-05-18 13:51:37 +08003821 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003822 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003823 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003824 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003825
3826 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003827 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003828 */
3829 spin_lock_irqsave(&domain->iommu_lock, flags2);
3830 if (test_and_clear_bit(iommu->seq_id,
Mike Travis1b198bb2012-03-05 15:05:16 -08003831 domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08003832 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003833 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003834 }
3835 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3836
3837 free_devinfo_mem(info);
3838 spin_lock_irqsave(&device_domain_lock, flags1);
3839 }
3840 spin_unlock_irqrestore(&device_domain_lock, flags1);
3841}
3842
Weidong Han5e98c4b2008-12-08 23:03:27 +08003843/* domain id for virtual machine, it won't be set in context */
Jiang Liu18d99162014-01-06 14:18:10 +08003844static atomic_t vm_domid = ATOMIC_INIT(0);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003845
3846static struct dmar_domain *iommu_alloc_vm_domain(void)
3847{
3848 struct dmar_domain *domain;
3849
3850 domain = alloc_domain_mem();
3851 if (!domain)
3852 return NULL;
3853
Jiang Liu18d99162014-01-06 14:18:10 +08003854 domain->id = atomic_inc_return(&vm_domid);
Suresh Siddha4c923d42009-10-02 11:01:24 -07003855 domain->nid = -1;
Mike Travis1b198bb2012-03-05 15:05:16 -08003856 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003857 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3858
3859 return domain;
3860}
3861
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003862static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003863{
3864 int adjust_width;
3865
3866 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003867 spin_lock_init(&domain->iommu_lock);
3868
3869 domain_reserve_special_ranges(domain);
3870
3871 /* calculate AGAW */
3872 domain->gaw = guest_width;
3873 adjust_width = guestwidth_to_adjustwidth(guest_width);
3874 domain->agaw = width_to_agaw(adjust_width);
3875
3876 INIT_LIST_HEAD(&domain->devices);
3877
3878 domain->iommu_count = 0;
3879 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003880 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01003881 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003882 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003883 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003884
3885 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07003886 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003887 if (!domain->pgd)
3888 return -ENOMEM;
3889 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3890 return 0;
3891}
3892
3893static void iommu_free_vm_domain(struct dmar_domain *domain)
3894{
3895 unsigned long flags;
3896 struct dmar_drhd_unit *drhd;
3897 struct intel_iommu *iommu;
3898 unsigned long i;
3899 unsigned long ndomains;
3900
Jiang Liu7c919772014-01-06 14:18:18 +08003901 for_each_active_iommu(iommu, drhd) {
Weidong Han5e98c4b2008-12-08 23:03:27 +08003902 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08003903 for_each_set_bit(i, iommu->domain_ids, ndomains) {
Weidong Han5e98c4b2008-12-08 23:03:27 +08003904 if (iommu->domains[i] == domain) {
3905 spin_lock_irqsave(&iommu->lock, flags);
3906 clear_bit(i, iommu->domain_ids);
3907 iommu->domains[i] = NULL;
3908 spin_unlock_irqrestore(&iommu->lock, flags);
3909 break;
3910 }
Weidong Han5e98c4b2008-12-08 23:03:27 +08003911 }
3912 }
3913}
3914
3915static void vm_domain_exit(struct dmar_domain *domain)
3916{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003917 /* Domain 0 is reserved, so dont process it */
3918 if (!domain)
3919 return;
3920
3921 vm_domain_remove_all_dev_info(domain);
3922 /* destroy iovas */
3923 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003924
3925 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003926 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003927
3928 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003929 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003930
3931 iommu_free_vm_domain(domain);
3932 free_domain_mem(domain);
3933}
3934
Joerg Roedel5d450802008-12-03 14:52:32 +01003935static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003936{
Joerg Roedel5d450802008-12-03 14:52:32 +01003937 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003938
Joerg Roedel5d450802008-12-03 14:52:32 +01003939 dmar_domain = iommu_alloc_vm_domain();
3940 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003941 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003942 "intel_iommu_domain_init: dmar_domain == NULL\n");
3943 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003944 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003945 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003946 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003947 "intel_iommu_domain_init() failed\n");
3948 vm_domain_exit(dmar_domain);
3949 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003950 }
Allen Kay8140a952011-10-14 12:32:17 -07003951 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01003952 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003953
Joerg Roedel8a0e7152012-01-26 19:40:54 +01003954 domain->geometry.aperture_start = 0;
3955 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
3956 domain->geometry.force_aperture = true;
3957
Joerg Roedel5d450802008-12-03 14:52:32 +01003958 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003959}
Kay, Allen M38717942008-09-09 18:37:29 +03003960
Joerg Roedel5d450802008-12-03 14:52:32 +01003961static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003962{
Joerg Roedel5d450802008-12-03 14:52:32 +01003963 struct dmar_domain *dmar_domain = domain->priv;
3964
3965 domain->priv = NULL;
3966 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003967}
Kay, Allen M38717942008-09-09 18:37:29 +03003968
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003969static int intel_iommu_attach_device(struct iommu_domain *domain,
3970 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003971{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003972 struct dmar_domain *dmar_domain = domain->priv;
3973 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003974 struct intel_iommu *iommu;
3975 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03003976
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003977 /* normally pdev is not mapped */
3978 if (unlikely(domain_context_mapped(pdev))) {
3979 struct dmar_domain *old_domain;
3980
3981 old_domain = find_domain(pdev);
3982 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003983 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3984 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3985 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003986 else
3987 domain_remove_dev_info(old_domain);
3988 }
3989 }
3990
David Woodhouse276dbf992009-04-04 01:45:37 +01003991 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3992 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003993 if (!iommu)
3994 return -ENODEV;
3995
3996 /* check if this iommu agaw is sufficient for max mapped address */
3997 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01003998 if (addr_width > cap_mgaw(iommu->cap))
3999 addr_width = cap_mgaw(iommu->cap);
4000
4001 if (dmar_domain->max_addr > (1LL << addr_width)) {
4002 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004003 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004004 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004005 return -EFAULT;
4006 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004007 dmar_domain->gaw = addr_width;
4008
4009 /*
4010 * Knock out extra levels of page tables if necessary
4011 */
4012 while (iommu->agaw < dmar_domain->agaw) {
4013 struct dma_pte *pte;
4014
4015 pte = dmar_domain->pgd;
4016 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004017 dmar_domain->pgd = (struct dma_pte *)
4018 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004019 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004020 }
4021 dmar_domain->agaw--;
4022 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004023
David Woodhouse5fe60f42009-08-09 10:53:41 +01004024 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004025}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004026
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004027static void intel_iommu_detach_device(struct iommu_domain *domain,
4028 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004029{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004030 struct dmar_domain *dmar_domain = domain->priv;
4031 struct pci_dev *pdev = to_pci_dev(dev);
4032
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004033 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03004034}
Kay, Allen M38717942008-09-09 18:37:29 +03004035
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004036static int intel_iommu_map(struct iommu_domain *domain,
4037 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004038 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004039{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004040 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004041 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004042 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004043 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004044
Joerg Roedeldde57a22008-12-03 15:04:09 +01004045 if (iommu_prot & IOMMU_READ)
4046 prot |= DMA_PTE_READ;
4047 if (iommu_prot & IOMMU_WRITE)
4048 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004049 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4050 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004051
David Woodhouse163cc522009-06-28 00:51:17 +01004052 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004053 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004054 u64 end;
4055
4056 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004057 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004058 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004059 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004060 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004061 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004062 return -EFAULT;
4063 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004064 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004065 }
David Woodhousead051222009-06-28 14:22:28 +01004066 /* Round up size to next multiple of PAGE_SIZE, if it and
4067 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004068 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004069 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4070 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004071 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004072}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004073
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004074static size_t intel_iommu_unmap(struct iommu_domain *domain,
4075 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004076{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004077 struct dmar_domain *dmar_domain = domain->priv;
Allen Kay292827c2011-10-14 12:31:54 -07004078 int order;
Sheng Yang4b99d352009-07-08 11:52:52 +01004079
Allen Kay292827c2011-10-14 12:31:54 -07004080 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
David Woodhouse163cc522009-06-28 00:51:17 +01004081 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004082
David Woodhouse163cc522009-06-28 00:51:17 +01004083 if (dmar_domain->max_addr == iova + size)
4084 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004085
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004086 return PAGE_SIZE << order;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004087}
Kay, Allen M38717942008-09-09 18:37:29 +03004088
Joerg Roedeld14d6572008-12-03 15:06:57 +01004089static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05304090 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004091{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004092 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004093 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004094 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004095
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004096 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
Kay, Allen M38717942008-09-09 18:37:29 +03004097 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004098 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004099
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004100 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004101}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004102
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004103static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4104 unsigned long cap)
4105{
4106 struct dmar_domain *dmar_domain = domain->priv;
4107
4108 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4109 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004110 if (cap == IOMMU_CAP_INTR_REMAP)
Suresh Siddha95a02e92012-03-30 11:47:07 -07004111 return irq_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004112
4113 return 0;
4114}
4115
Alex Williamson783f1572012-05-30 14:19:43 -06004116#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4117
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004118static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004119{
4120 struct pci_dev *pdev = to_pci_dev(dev);
Alex Williamson3da4af0a2012-11-13 10:22:03 -07004121 struct pci_dev *bridge, *dma_pdev = NULL;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004122 struct iommu_group *group;
4123 int ret;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004124
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004125 if (!device_to_iommu(pci_domain_nr(pdev->bus),
4126 pdev->bus->number, pdev->devfn))
Alex Williamson70ae6f02011-10-21 15:56:11 -04004127 return -ENODEV;
4128
4129 bridge = pci_find_upstream_pcie_bridge(pdev);
4130 if (bridge) {
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004131 if (pci_is_pcie(bridge))
4132 dma_pdev = pci_get_domain_bus_and_slot(
4133 pci_domain_nr(pdev->bus),
4134 bridge->subordinate->number, 0);
Alex Williamson3da4af0a2012-11-13 10:22:03 -07004135 if (!dma_pdev)
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004136 dma_pdev = pci_dev_get(bridge);
4137 } else
4138 dma_pdev = pci_dev_get(pdev);
4139
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004140 /* Account for quirked devices */
Alex Williamson783f1572012-05-30 14:19:43 -06004141 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
4142
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004143 /*
4144 * If it's a multifunction device that does not support our
Alex Williamsonc14d2692013-05-30 12:39:18 -06004145 * required ACS flags, add to the same group as lowest numbered
4146 * function that also does not suport the required ACS flags.
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004147 */
Alex Williamson783f1572012-05-30 14:19:43 -06004148 if (dma_pdev->multifunction &&
Alex Williamsonc14d2692013-05-30 12:39:18 -06004149 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
4150 u8 i, slot = PCI_SLOT(dma_pdev->devfn);
4151
4152 for (i = 0; i < 8; i++) {
4153 struct pci_dev *tmp;
4154
4155 tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
4156 if (!tmp)
4157 continue;
4158
4159 if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
4160 swap_pci_ref(&dma_pdev, tmp);
4161 break;
4162 }
4163 pci_dev_put(tmp);
4164 }
4165 }
Alex Williamson783f1572012-05-30 14:19:43 -06004166
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004167 /*
4168 * Devices on the root bus go through the iommu. If that's not us,
4169 * find the next upstream device and test ACS up to the root bus.
4170 * Finding the next device may require skipping virtual buses.
4171 */
Alex Williamson783f1572012-05-30 14:19:43 -06004172 while (!pci_is_root_bus(dma_pdev->bus)) {
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004173 struct pci_bus *bus = dma_pdev->bus;
4174
4175 while (!bus->self) {
4176 if (!pci_is_root_bus(bus))
4177 bus = bus->parent;
4178 else
4179 goto root_bus;
4180 }
4181
4182 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
Alex Williamson783f1572012-05-30 14:19:43 -06004183 break;
4184
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004185 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
Alex Williamson70ae6f02011-10-21 15:56:11 -04004186 }
4187
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004188root_bus:
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004189 group = iommu_group_get(&dma_pdev->dev);
4190 pci_dev_put(dma_pdev);
4191 if (!group) {
4192 group = iommu_group_alloc();
4193 if (IS_ERR(group))
4194 return PTR_ERR(group);
4195 }
Alex Williamsonbcb71ab2011-10-21 15:56:24 -04004196
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004197 ret = iommu_group_add_device(group, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004198
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004199 iommu_group_put(group);
4200 return ret;
4201}
4202
4203static void intel_iommu_remove_device(struct device *dev)
4204{
4205 iommu_group_remove_device(dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004206}
4207
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004208static struct iommu_ops intel_iommu_ops = {
4209 .domain_init = intel_iommu_domain_init,
4210 .domain_destroy = intel_iommu_domain_destroy,
4211 .attach_dev = intel_iommu_attach_device,
4212 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004213 .map = intel_iommu_map,
4214 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004215 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004216 .domain_has_cap = intel_iommu_domain_has_cap,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004217 .add_device = intel_iommu_add_device,
4218 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004219 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004220};
David Woodhouse9af88142009-02-13 23:18:03 +00004221
Daniel Vetter94526182013-01-20 23:50:13 +01004222static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4223{
4224 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4225 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4226 dmar_map_gfx = 0;
4227}
4228
4229DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4230DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4231DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4232DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4233DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4234DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4235DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4236
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004237static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004238{
4239 /*
4240 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004241 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004242 */
4243 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4244 rwbf_quirk = 1;
4245}
4246
4247DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004248DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4249DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4250DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4251DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4252DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4253DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004254
Adam Jacksoneecfd572010-08-25 21:17:34 +01004255#define GGC 0x52
4256#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4257#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4258#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4259#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4260#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4261#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4262#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4263#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4264
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004265static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004266{
4267 unsigned short ggc;
4268
Adam Jacksoneecfd572010-08-25 21:17:34 +01004269 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004270 return;
4271
Adam Jacksoneecfd572010-08-25 21:17:34 +01004272 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004273 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4274 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004275 } else if (dmar_map_gfx) {
4276 /* we have to ensure the gfx device is idle before we flush */
4277 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4278 intel_iommu_strict = 1;
4279 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004280}
4281DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4282DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4283DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4284DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4285
David Woodhousee0fc7e02009-09-30 09:12:17 -07004286/* On Tylersburg chipsets, some BIOSes have been known to enable the
4287 ISOCH DMAR unit for the Azalia sound device, but not give it any
4288 TLB entries, which causes it to deadlock. Check for that. We do
4289 this in a function called from init_dmars(), instead of in a PCI
4290 quirk, because we don't want to print the obnoxious "BIOS broken"
4291 message if VT-d is actually disabled.
4292*/
4293static void __init check_tylersburg_isoch(void)
4294{
4295 struct pci_dev *pdev;
4296 uint32_t vtisochctrl;
4297
4298 /* If there's no Azalia in the system anyway, forget it. */
4299 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4300 if (!pdev)
4301 return;
4302 pci_dev_put(pdev);
4303
4304 /* System Management Registers. Might be hidden, in which case
4305 we can't do the sanity check. But that's OK, because the
4306 known-broken BIOSes _don't_ actually hide it, so far. */
4307 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4308 if (!pdev)
4309 return;
4310
4311 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4312 pci_dev_put(pdev);
4313 return;
4314 }
4315
4316 pci_dev_put(pdev);
4317
4318 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4319 if (vtisochctrl & 1)
4320 return;
4321
4322 /* Drop all bits other than the number of TLB entries */
4323 vtisochctrl &= 0x1c;
4324
4325 /* If we have the recommended number of TLB entries (16), fine. */
4326 if (vtisochctrl == 0x10)
4327 return;
4328
4329 /* Zero TLB entries? You get to ride the short bus to school. */
4330 if (!vtisochctrl) {
4331 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4332 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4333 dmi_get_system_info(DMI_BIOS_VENDOR),
4334 dmi_get_system_info(DMI_BIOS_VERSION),
4335 dmi_get_system_info(DMI_PRODUCT_VERSION));
4336 iommu_identity_mapping |= IDENTMAP_AZALIA;
4337 return;
4338 }
4339
4340 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4341 vtisochctrl);
4342}