blob: d9c0dc5a5d359020703f9e96e794c871e2bc5aef [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040027#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070028#include <linux/slab.h>
29#include <linux/irq.h>
30#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070031#include <linux/spinlock.h>
32#include <linux/pci.h>
33#include <linux/dmar.h>
34#include <linux/dma-mapping.h>
35#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080044#include <linux/memblock.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070045#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090047#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070048
Joerg Roedel078e1ee2012-09-26 12:44:43 +020049#include "irq_remapping.h"
Varun Sethi61e015a2013-04-23 10:05:24 +053050#include "pci.h"
Joerg Roedel078e1ee2012-09-26 12:44:43 +020051
Fenghua Yu5b6985c2008-10-16 18:02:32 -070052#define ROOT_SIZE VTD_PAGE_SIZE
53#define CONTEXT_SIZE VTD_PAGE_SIZE
54
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
56#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070057#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058
59#define IOAPIC_RANGE_START (0xfee00000)
60#define IOAPIC_RANGE_END (0xfeefffff)
61#define IOVA_START_ADDR (0x1000)
62
63#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
64
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070065#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080066#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070067
David Woodhouse2ebe3152009-09-19 07:34:04 -070068#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
69#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
70
71/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
72 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
73#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
74 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
75#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070076
Mark McLoughlinf27be032008-11-20 15:49:43 +000077#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070078#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070079#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080080
Andrew Mortondf08cdc2010-09-22 13:05:11 -070081/* page table handling */
82#define LEVEL_STRIDE (9)
83#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
84
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020085/*
86 * This bitmap is used to advertise the page sizes our hardware support
87 * to the IOMMU core, which will then use this information to split
88 * physically contiguous memory regions it is mapping into page sizes
89 * that we support.
90 *
91 * Traditionally the IOMMU core just handed us the mappings directly,
92 * after making sure the size is an order of a 4KiB page and that the
93 * mapping has natural alignment.
94 *
95 * To retain this behavior, we currently advertise that we support
96 * all page sizes that are an order of 4KiB.
97 *
98 * If at some point we'd like to utilize the IOMMU core's new behavior,
99 * we could change this to advertise the real page sizes we support.
100 */
101#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
102
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700103static inline int agaw_to_level(int agaw)
104{
105 return agaw + 2;
106}
107
108static inline int agaw_to_width(int agaw)
109{
Jiang Liu5c645b32014-01-06 14:18:12 +0800110 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700111}
112
113static inline int width_to_agaw(int width)
114{
Jiang Liu5c645b32014-01-06 14:18:12 +0800115 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700116}
117
118static inline unsigned int level_to_offset_bits(int level)
119{
120 return (level - 1) * LEVEL_STRIDE;
121}
122
123static inline int pfn_level_offset(unsigned long pfn, int level)
124{
125 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
126}
127
128static inline unsigned long level_mask(int level)
129{
130 return -1UL << level_to_offset_bits(level);
131}
132
133static inline unsigned long level_size(int level)
134{
135 return 1UL << level_to_offset_bits(level);
136}
137
138static inline unsigned long align_to_level(unsigned long pfn, int level)
139{
140 return (pfn + level_size(level) - 1) & level_mask(level);
141}
David Woodhousefd18de52009-05-10 23:57:41 +0100142
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100143static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
144{
Jiang Liu5c645b32014-01-06 14:18:12 +0800145 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100146}
147
David Woodhousedd4e8312009-06-27 16:21:20 +0100148/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
149 are never going to work. */
150static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
151{
152 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
153}
154
155static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
156{
157 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
158}
159static inline unsigned long page_to_dma_pfn(struct page *pg)
160{
161 return mm_to_dma_pfn(page_to_pfn(pg));
162}
163static inline unsigned long virt_to_dma_pfn(void *p)
164{
165 return page_to_dma_pfn(virt_to_page(p));
166}
167
Weidong Hand9630fe2008-12-08 11:06:32 +0800168/* global iommu list, set NULL for ignored DMAR units */
169static struct intel_iommu **g_iommus;
170
David Woodhousee0fc7e02009-09-30 09:12:17 -0700171static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000172static int rwbf_quirk;
173
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000174/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700175 * set to 1 to panic kernel if can't successfully enable VT-d
176 * (used when kernel is launched w/ TXT)
177 */
178static int force_on = 0;
179
180/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000181 * 0: Present
182 * 1-11: Reserved
183 * 12-63: Context Ptr (12 - (haw-1))
184 * 64-127: Reserved
185 */
186struct root_entry {
187 u64 val;
188 u64 rsvd1;
189};
190#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
191static inline bool root_present(struct root_entry *root)
192{
193 return (root->val & 1);
194}
195static inline void set_root_present(struct root_entry *root)
196{
197 root->val |= 1;
198}
199static inline void set_root_value(struct root_entry *root, unsigned long value)
200{
201 root->val |= value & VTD_PAGE_MASK;
202}
203
204static inline struct context_entry *
205get_context_addr_from_root(struct root_entry *root)
206{
207 return (struct context_entry *)
208 (root_present(root)?phys_to_virt(
209 root->val & VTD_PAGE_MASK) :
210 NULL);
211}
212
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000213/*
214 * low 64 bits:
215 * 0: present
216 * 1: fault processing disable
217 * 2-3: translation type
218 * 12-63: address space root
219 * high 64 bits:
220 * 0-2: address width
221 * 3-6: aval
222 * 8-23: domain id
223 */
224struct context_entry {
225 u64 lo;
226 u64 hi;
227};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000228
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000229static inline bool context_present(struct context_entry *context)
230{
231 return (context->lo & 1);
232}
233static inline void context_set_present(struct context_entry *context)
234{
235 context->lo |= 1;
236}
237
238static inline void context_set_fault_enable(struct context_entry *context)
239{
240 context->lo &= (((u64)-1) << 2) | 1;
241}
242
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000243static inline void context_set_translation_type(struct context_entry *context,
244 unsigned long value)
245{
246 context->lo &= (((u64)-1) << 4) | 3;
247 context->lo |= (value & 3) << 2;
248}
249
250static inline void context_set_address_root(struct context_entry *context,
251 unsigned long value)
252{
253 context->lo |= value & VTD_PAGE_MASK;
254}
255
256static inline void context_set_address_width(struct context_entry *context,
257 unsigned long value)
258{
259 context->hi |= value & 7;
260}
261
262static inline void context_set_domain_id(struct context_entry *context,
263 unsigned long value)
264{
265 context->hi |= (value & ((1 << 16) - 1)) << 8;
266}
267
268static inline void context_clear_entry(struct context_entry *context)
269{
270 context->lo = 0;
271 context->hi = 0;
272}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000273
Mark McLoughlin622ba122008-11-20 15:49:46 +0000274/*
275 * 0: readable
276 * 1: writable
277 * 2-6: reserved
278 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800279 * 8-10: available
280 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000281 * 12-63: Host physcial address
282 */
283struct dma_pte {
284 u64 val;
285};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000286
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000287static inline void dma_clear_pte(struct dma_pte *pte)
288{
289 pte->val = 0;
290}
291
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000292static inline u64 dma_pte_addr(struct dma_pte *pte)
293{
David Woodhousec85994e2009-07-01 19:21:24 +0100294#ifdef CONFIG_64BIT
295 return pte->val & VTD_PAGE_MASK;
296#else
297 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100298 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100299#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000300}
301
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000302static inline bool dma_pte_present(struct dma_pte *pte)
303{
304 return (pte->val & 3) != 0;
305}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000306
Allen Kay4399c8b2011-10-14 12:32:46 -0700307static inline bool dma_pte_superpage(struct dma_pte *pte)
308{
309 return (pte->val & (1 << 7));
310}
311
David Woodhouse75e6bf92009-07-02 11:21:16 +0100312static inline int first_pte_in_page(struct dma_pte *pte)
313{
314 return !((unsigned long)pte & ~VTD_PAGE_MASK);
315}
316
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700317/*
318 * This domain is a statically identity mapping domain.
319 * 1. This domain creats a static 1:1 mapping to all usable memory.
320 * 2. It maps to each iommu if successful.
321 * 3. Each iommu mapps to this domain if successful.
322 */
David Woodhouse19943b02009-08-04 16:19:20 +0100323static struct dmar_domain *si_domain;
324static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700325
Weidong Han3b5410e2008-12-08 09:17:15 +0800326/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100327#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800328
Weidong Han1ce28fe2008-12-08 16:35:39 +0800329/* domain represents a virtual machine, more than one devices
330 * across iommus may be owned in one domain, e.g. kvm guest.
331 */
332#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
333
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700334/* si_domain contains mulitple devices */
335#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
336
Mike Travis1b198bb2012-03-05 15:05:16 -0800337/* define the limit of IOMMUs supported in each domain */
338#ifdef CONFIG_X86
339# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
340#else
341# define IOMMU_UNITS_SUPPORTED 64
342#endif
343
Mark McLoughlin99126f72008-11-20 15:49:47 +0000344struct dmar_domain {
345 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700346 int nid; /* node id */
Mike Travis1b198bb2012-03-05 15:05:16 -0800347 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
348 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000349
350 struct list_head devices; /* all devices' list */
351 struct iova_domain iovad; /* iova's that belong to this domain */
352
353 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000354 int gaw; /* max guest address width */
355
356 /* adjusted guest address width, 0 is level 2 30-bit */
357 int agaw;
358
Weidong Han3b5410e2008-12-08 09:17:15 +0800359 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800360
361 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800362 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800363 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100364 int iommu_superpage;/* Level of superpages supported:
365 0 == 4KiB (no superpages), 1 == 2MiB,
366 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800367 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800368 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000369};
370
Mark McLoughlina647dac2008-11-20 15:49:48 +0000371/* PCI domain-device relationship */
372struct device_domain_info {
373 struct list_head link; /* link to domain siblings */
374 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100375 int segment; /* PCI domain */
376 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000377 u8 devfn; /* PCI devfn number */
Stefan Assmann45e829e2009-12-03 06:49:24 -0500378 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800379 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000380 struct dmar_domain *domain; /* pointer to domain */
381};
382
Jiang Liub94e4112014-02-19 14:07:25 +0800383struct dmar_rmrr_unit {
384 struct list_head list; /* list of rmrr units */
385 struct acpi_dmar_header *hdr; /* ACPI header */
386 u64 base_address; /* reserved base address*/
387 u64 end_address; /* reserved end address */
Jiang Liu0e242612014-02-19 14:07:34 +0800388 struct pci_dev __rcu **devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800389 int devices_cnt; /* target device count */
390};
391
392struct dmar_atsr_unit {
393 struct list_head list; /* list of ATSR units */
394 struct acpi_dmar_header *hdr; /* ACPI header */
Jiang Liu0e242612014-02-19 14:07:34 +0800395 struct pci_dev __rcu **devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800396 int devices_cnt; /* target device count */
397 u8 include_all:1; /* include all ports */
398};
399
400static LIST_HEAD(dmar_atsr_units);
401static LIST_HEAD(dmar_rmrr_units);
402
403#define for_each_rmrr_units(rmrr) \
404 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
405
mark gross5e0d2a62008-03-04 15:22:08 -0800406static void flush_unmaps_timeout(unsigned long data);
407
Jiang Liub707cb02014-01-06 14:18:26 +0800408static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800409
mark gross80b20dd2008-04-18 13:53:58 -0700410#define HIGH_WATER_MARK 250
411struct deferred_flush_tables {
412 int next;
413 struct iova *iova[HIGH_WATER_MARK];
414 struct dmar_domain *domain[HIGH_WATER_MARK];
415};
416
417static struct deferred_flush_tables *deferred_flush;
418
mark gross5e0d2a62008-03-04 15:22:08 -0800419/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800420static int g_num_of_iommus;
421
422static DEFINE_SPINLOCK(async_umap_flush_lock);
423static LIST_HEAD(unmaps_to_do);
424
425static int timer_on;
426static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800427
Jiang Liu92d03cc2014-02-19 14:07:28 +0800428static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700429static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800430static void domain_remove_one_dev_info(struct dmar_domain *domain,
431 struct pci_dev *pdev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800432static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
433 struct pci_dev *pdev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700434
Suresh Siddhad3f13812011-08-23 17:05:25 -0700435#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800436int dmar_disabled = 0;
437#else
438int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700439#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800440
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200441int intel_iommu_enabled = 0;
442EXPORT_SYMBOL_GPL(intel_iommu_enabled);
443
David Woodhouse2d9e6672010-06-15 10:57:57 +0100444static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700445static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800446static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100447static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700448
David Woodhousec0771df2011-10-14 20:59:46 +0100449int intel_iommu_gfx_mapped;
450EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
451
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700452#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
453static DEFINE_SPINLOCK(device_domain_lock);
454static LIST_HEAD(device_domain_list);
455
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100456static struct iommu_ops intel_iommu_ops;
457
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700458static int __init intel_iommu_setup(char *str)
459{
460 if (!str)
461 return -EINVAL;
462 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800463 if (!strncmp(str, "on", 2)) {
464 dmar_disabled = 0;
465 printk(KERN_INFO "Intel-IOMMU: enabled\n");
466 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700467 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800468 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700469 } else if (!strncmp(str, "igfx_off", 8)) {
470 dmar_map_gfx = 0;
471 printk(KERN_INFO
472 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700473 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800474 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700475 "Intel-IOMMU: Forcing DAC for PCI devices\n");
476 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800477 } else if (!strncmp(str, "strict", 6)) {
478 printk(KERN_INFO
479 "Intel-IOMMU: disable batched IOTLB flush\n");
480 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100481 } else if (!strncmp(str, "sp_off", 6)) {
482 printk(KERN_INFO
483 "Intel-IOMMU: disable supported super page\n");
484 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700485 }
486
487 str += strcspn(str, ",");
488 while (*str == ',')
489 str++;
490 }
491 return 0;
492}
493__setup("intel_iommu=", intel_iommu_setup);
494
495static struct kmem_cache *iommu_domain_cache;
496static struct kmem_cache *iommu_devinfo_cache;
497static struct kmem_cache *iommu_iova_cache;
498
Suresh Siddha4c923d42009-10-02 11:01:24 -0700499static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700500{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700501 struct page *page;
502 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700503
Suresh Siddha4c923d42009-10-02 11:01:24 -0700504 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
505 if (page)
506 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700507 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700508}
509
510static inline void free_pgtable_page(void *vaddr)
511{
512 free_page((unsigned long)vaddr);
513}
514
515static inline void *alloc_domain_mem(void)
516{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900517 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700518}
519
Kay, Allen M38717942008-09-09 18:37:29 +0300520static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700521{
522 kmem_cache_free(iommu_domain_cache, vaddr);
523}
524
525static inline void * alloc_devinfo_mem(void)
526{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900527 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700528}
529
530static inline void free_devinfo_mem(void *vaddr)
531{
532 kmem_cache_free(iommu_devinfo_cache, vaddr);
533}
534
535struct iova *alloc_iova_mem(void)
536{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900537 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700538}
539
540void free_iova_mem(struct iova *iova)
541{
542 kmem_cache_free(iommu_iova_cache, iova);
543}
544
Weidong Han1b573682008-12-08 15:34:06 +0800545
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700546static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800547{
548 unsigned long sagaw;
549 int agaw = -1;
550
551 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700552 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800553 agaw >= 0; agaw--) {
554 if (test_bit(agaw, &sagaw))
555 break;
556 }
557
558 return agaw;
559}
560
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700561/*
562 * Calculate max SAGAW for each iommu.
563 */
564int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
565{
566 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
567}
568
569/*
570 * calculate agaw for each iommu.
571 * "SAGAW" may be different across iommus, use a default agaw, and
572 * get a supported less agaw for iommus that don't support the default agaw.
573 */
574int iommu_calculate_agaw(struct intel_iommu *iommu)
575{
576 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
577}
578
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700579/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800580static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
581{
582 int iommu_id;
583
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700584 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800585 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700586 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800587
Mike Travis1b198bb2012-03-05 15:05:16 -0800588 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800589 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
590 return NULL;
591
592 return g_iommus[iommu_id];
593}
594
Weidong Han8e6040972008-12-08 15:49:06 +0800595static void domain_update_iommu_coherency(struct dmar_domain *domain)
596{
597 int i;
598
Alex Williamson2e12bc22011-11-11 17:26:44 -0700599 i = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
600
601 domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800602
Mike Travis1b198bb2012-03-05 15:05:16 -0800603 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Weidong Han8e6040972008-12-08 15:49:06 +0800604 if (!ecap_coherent(g_iommus[i]->ecap)) {
605 domain->iommu_coherency = 0;
606 break;
607 }
Weidong Han8e6040972008-12-08 15:49:06 +0800608 }
609}
610
Sheng Yang58c610b2009-03-18 15:33:05 +0800611static void domain_update_iommu_snooping(struct dmar_domain *domain)
612{
613 int i;
614
615 domain->iommu_snooping = 1;
616
Mike Travis1b198bb2012-03-05 15:05:16 -0800617 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800618 if (!ecap_sc_support(g_iommus[i]->ecap)) {
619 domain->iommu_snooping = 0;
620 break;
621 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800622 }
623}
624
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100625static void domain_update_iommu_superpage(struct dmar_domain *domain)
626{
Allen Kay8140a952011-10-14 12:32:17 -0700627 struct dmar_drhd_unit *drhd;
628 struct intel_iommu *iommu = NULL;
629 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100630
631 if (!intel_iommu_superpage) {
632 domain->iommu_superpage = 0;
633 return;
634 }
635
Allen Kay8140a952011-10-14 12:32:17 -0700636 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800637 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700638 for_each_active_iommu(iommu, drhd) {
639 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100640 if (!mask) {
641 break;
642 }
643 }
Jiang Liu0e242612014-02-19 14:07:34 +0800644 rcu_read_unlock();
645
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100646 domain->iommu_superpage = fls(mask);
647}
648
Sheng Yang58c610b2009-03-18 15:33:05 +0800649/* Some capabilities may be different across iommus */
650static void domain_update_iommu_cap(struct dmar_domain *domain)
651{
652 domain_update_iommu_coherency(domain);
653 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100654 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800655}
656
David Woodhouse276dbf992009-04-04 01:45:37 +0100657static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800658{
659 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800660 struct intel_iommu *iommu;
661 struct pci_dev *dev;
Weidong Hanc7151a82008-12-08 22:51:37 +0800662 int i;
663
Jiang Liu0e242612014-02-19 14:07:34 +0800664 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800665 for_each_active_iommu(iommu, drhd) {
David Woodhouse276dbf992009-04-04 01:45:37 +0100666 if (segment != drhd->segment)
667 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800668
Jiang Liub683b232014-02-19 14:07:32 +0800669 for_each_active_dev_scope(drhd->devices,
670 drhd->devices_cnt, i, dev) {
671 if (dev->bus->number == bus && dev->devfn == devfn)
672 goto out;
673 if (dev->subordinate &&
674 dev->subordinate->number <= bus &&
675 dev->subordinate->busn_res.end >= bus)
676 goto out;
David Woodhouse924b6232009-04-04 00:39:25 +0100677 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800678
679 if (drhd->include_all)
Jiang Liub683b232014-02-19 14:07:32 +0800680 goto out;
Weidong Hanc7151a82008-12-08 22:51:37 +0800681 }
Jiang Liub683b232014-02-19 14:07:32 +0800682 iommu = NULL;
683out:
Jiang Liu0e242612014-02-19 14:07:34 +0800684 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800685
Jiang Liub683b232014-02-19 14:07:32 +0800686 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800687}
688
Weidong Han5331fe62008-12-08 23:00:00 +0800689static void domain_flush_cache(struct dmar_domain *domain,
690 void *addr, int size)
691{
692 if (!domain->iommu_coherency)
693 clflush_cache_range(addr, size);
694}
695
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700696/* Gets context entry for a given bus and devfn */
697static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
698 u8 bus, u8 devfn)
699{
700 struct root_entry *root;
701 struct context_entry *context;
702 unsigned long phy_addr;
703 unsigned long flags;
704
705 spin_lock_irqsave(&iommu->lock, flags);
706 root = &iommu->root_entry[bus];
707 context = get_context_addr_from_root(root);
708 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700709 context = (struct context_entry *)
710 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700711 if (!context) {
712 spin_unlock_irqrestore(&iommu->lock, flags);
713 return NULL;
714 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700715 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700716 phy_addr = virt_to_phys((void *)context);
717 set_root_value(root, phy_addr);
718 set_root_present(root);
719 __iommu_flush_cache(iommu, root, sizeof(*root));
720 }
721 spin_unlock_irqrestore(&iommu->lock, flags);
722 return &context[devfn];
723}
724
725static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
726{
727 struct root_entry *root;
728 struct context_entry *context;
729 int ret;
730 unsigned long flags;
731
732 spin_lock_irqsave(&iommu->lock, flags);
733 root = &iommu->root_entry[bus];
734 context = get_context_addr_from_root(root);
735 if (!context) {
736 ret = 0;
737 goto out;
738 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000739 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700740out:
741 spin_unlock_irqrestore(&iommu->lock, flags);
742 return ret;
743}
744
745static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
746{
747 struct root_entry *root;
748 struct context_entry *context;
749 unsigned long flags;
750
751 spin_lock_irqsave(&iommu->lock, flags);
752 root = &iommu->root_entry[bus];
753 context = get_context_addr_from_root(root);
754 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000755 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700756 __iommu_flush_cache(iommu, &context[devfn], \
757 sizeof(*context));
758 }
759 spin_unlock_irqrestore(&iommu->lock, flags);
760}
761
762static void free_context_table(struct intel_iommu *iommu)
763{
764 struct root_entry *root;
765 int i;
766 unsigned long flags;
767 struct context_entry *context;
768
769 spin_lock_irqsave(&iommu->lock, flags);
770 if (!iommu->root_entry) {
771 goto out;
772 }
773 for (i = 0; i < ROOT_ENTRY_NR; i++) {
774 root = &iommu->root_entry[i];
775 context = get_context_addr_from_root(root);
776 if (context)
777 free_pgtable_page(context);
778 }
779 free_pgtable_page(iommu->root_entry);
780 iommu->root_entry = NULL;
781out:
782 spin_unlock_irqrestore(&iommu->lock, flags);
783}
784
David Woodhouseb026fd22009-06-28 10:37:25 +0100785static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
Allen Kay4399c8b2011-10-14 12:32:46 -0700786 unsigned long pfn, int target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700787{
David Woodhouseb026fd22009-06-28 10:37:25 +0100788 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700789 struct dma_pte *parent, *pte = NULL;
790 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700791 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700792
793 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200794
795 if (addr_width < BITS_PER_LONG && pfn >> addr_width)
796 /* Address beyond IOMMU's addressing capabilities. */
797 return NULL;
798
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700799 parent = domain->pgd;
800
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700801 while (level > 0) {
802 void *tmp_page;
803
David Woodhouseb026fd22009-06-28 10:37:25 +0100804 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700805 pte = &parent[offset];
Allen Kay4399c8b2011-10-14 12:32:46 -0700806 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100807 break;
808 if (level == target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700809 break;
810
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000811 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100812 uint64_t pteval;
813
Suresh Siddha4c923d42009-10-02 11:01:24 -0700814 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700815
David Woodhouse206a73c2009-07-01 19:30:28 +0100816 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700817 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +0100818
David Woodhousec85994e2009-07-01 19:21:24 +0100819 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400820 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100821 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
822 /* Someone else set it while we were thinking; use theirs. */
823 free_pgtable_page(tmp_page);
824 } else {
825 dma_pte_addr(pte);
826 domain_flush_cache(domain, pte, sizeof(*pte));
827 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700828 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000829 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700830 level--;
831 }
832
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700833 return pte;
834}
835
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100836
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700837/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100838static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
839 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100840 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700841{
842 struct dma_pte *parent, *pte = NULL;
843 int total = agaw_to_level(domain->agaw);
844 int offset;
845
846 parent = domain->pgd;
847 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100848 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700849 pte = &parent[offset];
850 if (level == total)
851 return pte;
852
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100853 if (!dma_pte_present(pte)) {
854 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700855 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100856 }
857
858 if (pte->val & DMA_PTE_LARGE_PAGE) {
859 *large_page = total;
860 return pte;
861 }
862
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000863 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700864 total--;
865 }
866 return NULL;
867}
868
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700869/* clear last level pte, a tlb flush should be followed */
Allen Kay292827c2011-10-14 12:31:54 -0700870static int dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100871 unsigned long start_pfn,
872 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700873{
David Woodhouse04b18e62009-06-27 19:15:01 +0100874 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100875 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100876 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700877
David Woodhouse04b18e62009-06-27 19:15:01 +0100878 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100879 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700880 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100881
David Woodhouse04b18e62009-06-27 19:15:01 +0100882 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700883 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100884 large_page = 1;
885 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100886 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100887 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100888 continue;
889 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100890 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100891 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100892 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100893 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100894 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
895
David Woodhouse310a5ab2009-06-28 18:52:20 +0100896 domain_flush_cache(domain, first_pte,
897 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700898
899 } while (start_pfn && start_pfn <= last_pfn);
Allen Kay292827c2011-10-14 12:31:54 -0700900
Jiang Liu5c645b32014-01-06 14:18:12 +0800901 return min_t(int, (large_page - 1) * 9, MAX_AGAW_PFN_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700902}
903
Alex Williamson3269ee02013-06-15 10:27:19 -0600904static void dma_pte_free_level(struct dmar_domain *domain, int level,
905 struct dma_pte *pte, unsigned long pfn,
906 unsigned long start_pfn, unsigned long last_pfn)
907{
908 pfn = max(start_pfn, pfn);
909 pte = &pte[pfn_level_offset(pfn, level)];
910
911 do {
912 unsigned long level_pfn;
913 struct dma_pte *level_pte;
914
915 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
916 goto next;
917
918 level_pfn = pfn & level_mask(level - 1);
919 level_pte = phys_to_virt(dma_pte_addr(pte));
920
921 if (level > 2)
922 dma_pte_free_level(domain, level - 1, level_pte,
923 level_pfn, start_pfn, last_pfn);
924
925 /* If range covers entire pagetable, free it */
926 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -0800927 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -0600928 dma_clear_pte(pte);
929 domain_flush_cache(domain, pte, sizeof(*pte));
930 free_pgtable_page(level_pte);
931 }
932next:
933 pfn += level_size(level);
934 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
935}
936
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700937/* free page table pages. last level pte should already be cleared */
938static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100939 unsigned long start_pfn,
940 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700941{
David Woodhouse6660c632009-06-27 22:41:00 +0100942 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700943
David Woodhouse6660c632009-06-27 22:41:00 +0100944 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
945 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700946 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700947
David Woodhousef3a0a522009-06-30 03:40:07 +0100948 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -0600949 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
950 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +0100951
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700952 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100953 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700954 free_pgtable_page(domain->pgd);
955 domain->pgd = NULL;
956 }
957}
958
959/* iommu handling */
960static int iommu_alloc_root_entry(struct intel_iommu *iommu)
961{
962 struct root_entry *root;
963 unsigned long flags;
964
Suresh Siddha4c923d42009-10-02 11:01:24 -0700965 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700966 if (!root)
967 return -ENOMEM;
968
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700969 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700970
971 spin_lock_irqsave(&iommu->lock, flags);
972 iommu->root_entry = root;
973 spin_unlock_irqrestore(&iommu->lock, flags);
974
975 return 0;
976}
977
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700978static void iommu_set_root_entry(struct intel_iommu *iommu)
979{
980 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100981 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700982 unsigned long flag;
983
984 addr = iommu->root_entry;
985
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200986 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700987 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
988
David Woodhousec416daa2009-05-10 20:30:58 +0100989 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700990
991 /* Make sure hardware complete it */
992 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100993 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700994
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200995 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700996}
997
998static void iommu_flush_write_buffer(struct intel_iommu *iommu)
999{
1000 u32 val;
1001 unsigned long flag;
1002
David Woodhouse9af88142009-02-13 23:18:03 +00001003 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001004 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001005
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001006 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001007 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001008
1009 /* Make sure hardware complete it */
1010 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001011 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001012
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001013 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001014}
1015
1016/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001017static void __iommu_flush_context(struct intel_iommu *iommu,
1018 u16 did, u16 source_id, u8 function_mask,
1019 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001020{
1021 u64 val = 0;
1022 unsigned long flag;
1023
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001024 switch (type) {
1025 case DMA_CCMD_GLOBAL_INVL:
1026 val = DMA_CCMD_GLOBAL_INVL;
1027 break;
1028 case DMA_CCMD_DOMAIN_INVL:
1029 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1030 break;
1031 case DMA_CCMD_DEVICE_INVL:
1032 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1033 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1034 break;
1035 default:
1036 BUG();
1037 }
1038 val |= DMA_CCMD_ICC;
1039
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001040 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001041 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1042
1043 /* Make sure hardware complete it */
1044 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1045 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1046
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001047 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001048}
1049
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001050/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001051static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1052 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001053{
1054 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1055 u64 val = 0, val_iva = 0;
1056 unsigned long flag;
1057
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001058 switch (type) {
1059 case DMA_TLB_GLOBAL_FLUSH:
1060 /* global flush doesn't need set IVA_REG */
1061 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1062 break;
1063 case DMA_TLB_DSI_FLUSH:
1064 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1065 break;
1066 case DMA_TLB_PSI_FLUSH:
1067 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1068 /* Note: always flush non-leaf currently */
1069 val_iva = size_order | addr;
1070 break;
1071 default:
1072 BUG();
1073 }
1074 /* Note: set drain read/write */
1075#if 0
1076 /*
1077 * This is probably to be super secure.. Looks like we can
1078 * ignore it without any impact.
1079 */
1080 if (cap_read_drain(iommu->cap))
1081 val |= DMA_TLB_READ_DRAIN;
1082#endif
1083 if (cap_write_drain(iommu->cap))
1084 val |= DMA_TLB_WRITE_DRAIN;
1085
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001086 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001087 /* Note: Only uses first TLB reg currently */
1088 if (val_iva)
1089 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1090 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1091
1092 /* Make sure hardware complete it */
1093 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1094 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1095
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001096 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001097
1098 /* check IOTLB invalidation granularity */
1099 if (DMA_TLB_IAIG(val) == 0)
1100 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1101 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1102 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001103 (unsigned long long)DMA_TLB_IIRG(type),
1104 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001105}
1106
Yu Zhao93a23a72009-05-18 13:51:37 +08001107static struct device_domain_info *iommu_support_dev_iotlb(
1108 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001109{
Yu Zhao93a23a72009-05-18 13:51:37 +08001110 int found = 0;
1111 unsigned long flags;
1112 struct device_domain_info *info;
1113 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1114
1115 if (!ecap_dev_iotlb_support(iommu->ecap))
1116 return NULL;
1117
1118 if (!iommu->qi)
1119 return NULL;
1120
1121 spin_lock_irqsave(&device_domain_lock, flags);
1122 list_for_each_entry(info, &domain->devices, link)
1123 if (info->bus == bus && info->devfn == devfn) {
1124 found = 1;
1125 break;
1126 }
1127 spin_unlock_irqrestore(&device_domain_lock, flags);
1128
1129 if (!found || !info->dev)
1130 return NULL;
1131
1132 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1133 return NULL;
1134
1135 if (!dmar_find_matched_atsr_unit(info->dev))
1136 return NULL;
1137
1138 info->iommu = iommu;
1139
1140 return info;
1141}
1142
1143static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1144{
1145 if (!info)
1146 return;
1147
1148 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1149}
1150
1151static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1152{
1153 if (!info->dev || !pci_ats_enabled(info->dev))
1154 return;
1155
1156 pci_disable_ats(info->dev);
1157}
1158
1159static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1160 u64 addr, unsigned mask)
1161{
1162 u16 sid, qdep;
1163 unsigned long flags;
1164 struct device_domain_info *info;
1165
1166 spin_lock_irqsave(&device_domain_lock, flags);
1167 list_for_each_entry(info, &domain->devices, link) {
1168 if (!info->dev || !pci_ats_enabled(info->dev))
1169 continue;
1170
1171 sid = info->bus << 8 | info->devfn;
1172 qdep = pci_ats_queue_depth(info->dev);
1173 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1174 }
1175 spin_unlock_irqrestore(&device_domain_lock, flags);
1176}
1177
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001178static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
Nadav Amit82653632010-04-01 13:24:40 +03001179 unsigned long pfn, unsigned int pages, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001180{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001181 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001182 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001183
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001184 BUG_ON(pages == 0);
1185
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001186 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001187 * Fallback to domain selective flush if no PSI support or the size is
1188 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001189 * PSI requires page size to be 2 ^ x, and the base address is naturally
1190 * aligned to the size
1191 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001192 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1193 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001194 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001195 else
1196 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1197 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001198
1199 /*
Nadav Amit82653632010-04-01 13:24:40 +03001200 * In caching mode, changes of pages from non-present to present require
1201 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001202 */
Nadav Amit82653632010-04-01 13:24:40 +03001203 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001204 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001205}
1206
mark grossf8bab732008-02-08 04:18:38 -08001207static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1208{
1209 u32 pmen;
1210 unsigned long flags;
1211
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001212 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001213 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1214 pmen &= ~DMA_PMEN_EPM;
1215 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1216
1217 /* wait for the protected region status bit to clear */
1218 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1219 readl, !(pmen & DMA_PMEN_PRS), pmen);
1220
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001221 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001222}
1223
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001224static int iommu_enable_translation(struct intel_iommu *iommu)
1225{
1226 u32 sts;
1227 unsigned long flags;
1228
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001229 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001230 iommu->gcmd |= DMA_GCMD_TE;
1231 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001232
1233 /* Make sure hardware complete it */
1234 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001235 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001236
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001237 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001238 return 0;
1239}
1240
1241static int iommu_disable_translation(struct intel_iommu *iommu)
1242{
1243 u32 sts;
1244 unsigned long flag;
1245
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001246 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001247 iommu->gcmd &= ~DMA_GCMD_TE;
1248 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1249
1250 /* Make sure hardware complete it */
1251 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001252 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001253
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001254 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001255 return 0;
1256}
1257
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001258
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001259static int iommu_init_domains(struct intel_iommu *iommu)
1260{
1261 unsigned long ndomains;
1262 unsigned long nlongs;
1263
1264 ndomains = cap_ndoms(iommu->cap);
Jiang Liu852bdb02014-01-06 14:18:11 +08001265 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1266 iommu->seq_id, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001267 nlongs = BITS_TO_LONGS(ndomains);
1268
Donald Dutile94a91b52009-08-20 16:51:34 -04001269 spin_lock_init(&iommu->lock);
1270
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001271 /* TBD: there might be 64K domains,
1272 * consider other allocation for future chip
1273 */
1274 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1275 if (!iommu->domain_ids) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001276 pr_err("IOMMU%d: allocating domain id array failed\n",
1277 iommu->seq_id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001278 return -ENOMEM;
1279 }
1280 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1281 GFP_KERNEL);
1282 if (!iommu->domains) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001283 pr_err("IOMMU%d: allocating domain array failed\n",
1284 iommu->seq_id);
1285 kfree(iommu->domain_ids);
1286 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001287 return -ENOMEM;
1288 }
1289
1290 /*
1291 * if Caching mode is set, then invalid translations are tagged
1292 * with domainid 0. Hence we need to pre-allocate it.
1293 */
1294 if (cap_caching_mode(iommu->cap))
1295 set_bit(0, iommu->domain_ids);
1296 return 0;
1297}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001298
Jiang Liua868e6b2014-01-06 14:18:20 +08001299static void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001300{
1301 struct dmar_domain *domain;
Jiang Liu5ced12a2014-01-06 14:18:22 +08001302 int i, count;
Weidong Hanc7151a82008-12-08 22:51:37 +08001303 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001304
Donald Dutile94a91b52009-08-20 16:51:34 -04001305 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001306 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001307 /*
1308 * Domain id 0 is reserved for invalid translation
1309 * if hardware supports caching mode.
1310 */
1311 if (cap_caching_mode(iommu->cap) && i == 0)
1312 continue;
1313
Donald Dutile94a91b52009-08-20 16:51:34 -04001314 domain = iommu->domains[i];
1315 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001316
Donald Dutile94a91b52009-08-20 16:51:34 -04001317 spin_lock_irqsave(&domain->iommu_lock, flags);
Jiang Liu5ced12a2014-01-06 14:18:22 +08001318 count = --domain->iommu_count;
1319 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001320 if (count == 0)
1321 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001322 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001323 }
1324
1325 if (iommu->gcmd & DMA_GCMD_TE)
1326 iommu_disable_translation(iommu);
1327
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001328 kfree(iommu->domains);
1329 kfree(iommu->domain_ids);
Jiang Liua868e6b2014-01-06 14:18:20 +08001330 iommu->domains = NULL;
1331 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001332
Weidong Hand9630fe2008-12-08 11:06:32 +08001333 g_iommus[iommu->seq_id] = NULL;
1334
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001335 /* free context mapping */
1336 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001337}
1338
Jiang Liu92d03cc2014-02-19 14:07:28 +08001339static struct dmar_domain *alloc_domain(bool vm)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001340{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001341 /* domain id for virtual machine, it won't be set in context */
1342 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001343 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001344
1345 domain = alloc_domain_mem();
1346 if (!domain)
1347 return NULL;
1348
Suresh Siddha4c923d42009-10-02 11:01:24 -07001349 domain->nid = -1;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001350 domain->iommu_count = 0;
Mike Travis1b198bb2012-03-05 15:05:16 -08001351 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Hand71a2f32008-12-07 21:13:41 +08001352 domain->flags = 0;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001353 spin_lock_init(&domain->iommu_lock);
1354 INIT_LIST_HEAD(&domain->devices);
1355 if (vm) {
1356 domain->id = atomic_inc_return(&vm_domid);
1357 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
1358 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001359
1360 return domain;
1361}
1362
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001363static int iommu_attach_domain(struct dmar_domain *domain,
1364 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001365{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001366 int num;
1367 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001368 unsigned long flags;
1369
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001370 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001371
1372 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001373
1374 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1375 if (num >= ndomains) {
1376 spin_unlock_irqrestore(&iommu->lock, flags);
1377 printk(KERN_ERR "IOMMU: no free domain ids\n");
1378 return -ENOMEM;
1379 }
1380
1381 domain->id = num;
Jiang Liu9ebd6822014-02-19 14:07:29 +08001382 domain->iommu_count++;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001383 set_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001384 set_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001385 iommu->domains[num] = domain;
1386 spin_unlock_irqrestore(&iommu->lock, flags);
1387
1388 return 0;
1389}
1390
1391static void iommu_detach_domain(struct dmar_domain *domain,
1392 struct intel_iommu *iommu)
1393{
1394 unsigned long flags;
1395 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001396
1397 spin_lock_irqsave(&iommu->lock, flags);
1398 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001399 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001400 if (iommu->domains[num] == domain) {
Jiang Liu92d03cc2014-02-19 14:07:28 +08001401 clear_bit(num, iommu->domain_ids);
1402 iommu->domains[num] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001403 break;
1404 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001405 }
Weidong Han8c11e792008-12-08 15:29:22 +08001406 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001407}
1408
1409static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001410static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001411
Joseph Cihula51a63e62011-03-21 11:04:24 -07001412static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001413{
1414 struct pci_dev *pdev = NULL;
1415 struct iova *iova;
1416 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001417
David Millerf6611972008-02-06 01:36:23 -08001418 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001419
Mark Gross8a443df2008-03-04 14:59:31 -08001420 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1421 &reserved_rbtree_key);
1422
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001423 /* IOAPIC ranges shouldn't be accessed by DMA */
1424 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1425 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001426 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001427 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001428 return -ENODEV;
1429 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001430
1431 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1432 for_each_pci_dev(pdev) {
1433 struct resource *r;
1434
1435 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1436 r = &pdev->resource[i];
1437 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1438 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001439 iova = reserve_iova(&reserved_iova_list,
1440 IOVA_PFN(r->start),
1441 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001442 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001443 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001444 return -ENODEV;
1445 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001446 }
1447 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001448 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001449}
1450
1451static void domain_reserve_special_ranges(struct dmar_domain *domain)
1452{
1453 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1454}
1455
1456static inline int guestwidth_to_adjustwidth(int gaw)
1457{
1458 int agaw;
1459 int r = (gaw - 12) % 9;
1460
1461 if (r == 0)
1462 agaw = gaw;
1463 else
1464 agaw = gaw + 9 - r;
1465 if (agaw > 64)
1466 agaw = 64;
1467 return agaw;
1468}
1469
1470static int domain_init(struct dmar_domain *domain, int guest_width)
1471{
1472 struct intel_iommu *iommu;
1473 int adjust_width, agaw;
1474 unsigned long sagaw;
1475
David Millerf6611972008-02-06 01:36:23 -08001476 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001477 domain_reserve_special_ranges(domain);
1478
1479 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001480 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001481 if (guest_width > cap_mgaw(iommu->cap))
1482 guest_width = cap_mgaw(iommu->cap);
1483 domain->gaw = guest_width;
1484 adjust_width = guestwidth_to_adjustwidth(guest_width);
1485 agaw = width_to_agaw(adjust_width);
1486 sagaw = cap_sagaw(iommu->cap);
1487 if (!test_bit(agaw, &sagaw)) {
1488 /* hardware doesn't support it, choose a bigger one */
1489 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1490 agaw = find_next_bit(&sagaw, 5, agaw);
1491 if (agaw >= 5)
1492 return -ENODEV;
1493 }
1494 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001495
Weidong Han8e6040972008-12-08 15:49:06 +08001496 if (ecap_coherent(iommu->ecap))
1497 domain->iommu_coherency = 1;
1498 else
1499 domain->iommu_coherency = 0;
1500
Sheng Yang58c610b2009-03-18 15:33:05 +08001501 if (ecap_sc_support(iommu->ecap))
1502 domain->iommu_snooping = 1;
1503 else
1504 domain->iommu_snooping = 0;
1505
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001506 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001507 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001508
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001509 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001510 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001511 if (!domain->pgd)
1512 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001513 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001514 return 0;
1515}
1516
1517static void domain_exit(struct dmar_domain *domain)
1518{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001519 struct dmar_drhd_unit *drhd;
1520 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001521
1522 /* Domain 0 is reserved, so dont process it */
1523 if (!domain)
1524 return;
1525
Alex Williamson7b668352011-05-24 12:02:41 +01001526 /* Flush any lazy unmaps that may reference this domain */
1527 if (!intel_iommu_strict)
1528 flush_unmaps_timeout(0);
1529
Jiang Liu92d03cc2014-02-19 14:07:28 +08001530 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001531 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001532
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001533 /* destroy iovas */
1534 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001535
1536 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001537 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001538
1539 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001540 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001541
Jiang Liu92d03cc2014-02-19 14:07:28 +08001542 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001543 rcu_read_lock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001544 for_each_active_iommu(iommu, drhd)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001545 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1546 test_bit(iommu->seq_id, domain->iommu_bmp))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001547 iommu_detach_domain(domain, iommu);
Jiang Liu0e242612014-02-19 14:07:34 +08001548 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001549
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001550 free_domain_mem(domain);
1551}
1552
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001553static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1554 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001555{
1556 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001557 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001558 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001559 struct dma_pte *pgd;
1560 unsigned long num;
1561 unsigned long ndomains;
1562 int id;
1563 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001564 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001565
1566 pr_debug("Set context mapping for %02x:%02x.%d\n",
1567 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001568
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001569 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001570 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1571 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001572
David Woodhouse276dbf992009-04-04 01:45:37 +01001573 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001574 if (!iommu)
1575 return -ENODEV;
1576
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001577 context = device_to_context_entry(iommu, bus, devfn);
1578 if (!context)
1579 return -ENOMEM;
1580 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001581 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001582 spin_unlock_irqrestore(&iommu->lock, flags);
1583 return 0;
1584 }
1585
Weidong Hanea6606b2008-12-08 23:08:15 +08001586 id = domain->id;
1587 pgd = domain->pgd;
1588
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001589 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1590 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001591 int found = 0;
1592
1593 /* find an available domain id for this device in iommu */
1594 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001595 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001596 if (iommu->domains[num] == domain) {
1597 id = num;
1598 found = 1;
1599 break;
1600 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001601 }
1602
1603 if (found == 0) {
1604 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1605 if (num >= ndomains) {
1606 spin_unlock_irqrestore(&iommu->lock, flags);
1607 printk(KERN_ERR "IOMMU: no free domain ids\n");
1608 return -EFAULT;
1609 }
1610
1611 set_bit(num, iommu->domain_ids);
1612 iommu->domains[num] = domain;
1613 id = num;
1614 }
1615
1616 /* Skip top levels of page tables for
1617 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001618 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001619 */
Chris Wright1672af12009-12-02 12:06:34 -08001620 if (translation != CONTEXT_TT_PASS_THROUGH) {
1621 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1622 pgd = phys_to_virt(dma_pte_addr(pgd));
1623 if (!dma_pte_present(pgd)) {
1624 spin_unlock_irqrestore(&iommu->lock, flags);
1625 return -ENOMEM;
1626 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001627 }
1628 }
1629 }
1630
1631 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001632
Yu Zhao93a23a72009-05-18 13:51:37 +08001633 if (translation != CONTEXT_TT_PASS_THROUGH) {
1634 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1635 translation = info ? CONTEXT_TT_DEV_IOTLB :
1636 CONTEXT_TT_MULTI_LEVEL;
1637 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001638 /*
1639 * In pass through mode, AW must be programmed to indicate the largest
1640 * AGAW value supported by hardware. And ASR is ignored by hardware.
1641 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001642 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001643 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001644 else {
1645 context_set_address_root(context, virt_to_phys(pgd));
1646 context_set_address_width(context, iommu->agaw);
1647 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001648
1649 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001650 context_set_fault_enable(context);
1651 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001652 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001653
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001654 /*
1655 * It's a non-present to present mapping. If hardware doesn't cache
1656 * non-present entry we only need to flush the write-buffer. If the
1657 * _does_ cache non-present entries, then it does so in the special
1658 * domain #0, which we have to flush:
1659 */
1660 if (cap_caching_mode(iommu->cap)) {
1661 iommu->flush.flush_context(iommu, 0,
1662 (((u16)bus) << 8) | devfn,
1663 DMA_CCMD_MASK_NOBIT,
1664 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001665 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001666 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001667 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001668 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001669 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001670 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001671
1672 spin_lock_irqsave(&domain->iommu_lock, flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08001673 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08001674 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001675 if (domain->iommu_count == 1)
1676 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001677 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001678 }
1679 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001680 return 0;
1681}
1682
1683static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001684domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1685 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001686{
1687 int ret;
1688 struct pci_dev *tmp, *parent;
1689
David Woodhouse276dbf992009-04-04 01:45:37 +01001690 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001691 pdev->bus->number, pdev->devfn,
1692 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001693 if (ret)
1694 return ret;
1695
1696 /* dependent device mapping */
1697 tmp = pci_find_upstream_pcie_bridge(pdev);
1698 if (!tmp)
1699 return 0;
1700 /* Secondary interface's bus number and devfn 0 */
1701 parent = pdev->bus->self;
1702 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001703 ret = domain_context_mapping_one(domain,
1704 pci_domain_nr(parent->bus),
1705 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001706 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001707 if (ret)
1708 return ret;
1709 parent = parent->bus->self;
1710 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001711 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001712 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001713 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001714 tmp->subordinate->number, 0,
1715 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001716 else /* this is a legacy PCI bridge */
1717 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001718 pci_domain_nr(tmp->bus),
1719 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001720 tmp->devfn,
1721 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001722}
1723
Weidong Han5331fe62008-12-08 23:00:00 +08001724static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001725{
1726 int ret;
1727 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001728 struct intel_iommu *iommu;
1729
David Woodhouse276dbf992009-04-04 01:45:37 +01001730 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1731 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001732 if (!iommu)
1733 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001734
David Woodhouse276dbf992009-04-04 01:45:37 +01001735 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001736 if (!ret)
1737 return ret;
1738 /* dependent device mapping */
1739 tmp = pci_find_upstream_pcie_bridge(pdev);
1740 if (!tmp)
1741 return ret;
1742 /* Secondary interface's bus number and devfn 0 */
1743 parent = pdev->bus->self;
1744 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001745 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001746 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001747 if (!ret)
1748 return ret;
1749 parent = parent->bus->self;
1750 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001751 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001752 return device_context_mapped(iommu, tmp->subordinate->number,
1753 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001754 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001755 return device_context_mapped(iommu, tmp->bus->number,
1756 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001757}
1758
Fenghua Yuf5329592009-08-04 15:09:37 -07001759/* Returns a number of VTD pages, but aligned to MM page size */
1760static inline unsigned long aligned_nrpages(unsigned long host_addr,
1761 size_t size)
1762{
1763 host_addr &= ~PAGE_MASK;
1764 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1765}
1766
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001767/* Return largest possible superpage level for a given mapping */
1768static inline int hardware_largepage_caps(struct dmar_domain *domain,
1769 unsigned long iov_pfn,
1770 unsigned long phy_pfn,
1771 unsigned long pages)
1772{
1773 int support, level = 1;
1774 unsigned long pfnmerge;
1775
1776 support = domain->iommu_superpage;
1777
1778 /* To use a large page, the virtual *and* physical addresses
1779 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1780 of them will mean we have to use smaller pages. So just
1781 merge them and check both at once. */
1782 pfnmerge = iov_pfn | phy_pfn;
1783
1784 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1785 pages >>= VTD_STRIDE_SHIFT;
1786 if (!pages)
1787 break;
1788 pfnmerge >>= VTD_STRIDE_SHIFT;
1789 level++;
1790 support--;
1791 }
1792 return level;
1793}
1794
David Woodhouse9051aa02009-06-29 12:30:54 +01001795static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1796 struct scatterlist *sg, unsigned long phys_pfn,
1797 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001798{
1799 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001800 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001801 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001802 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001803 unsigned int largepage_lvl = 0;
1804 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001805
1806 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1807
1808 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1809 return -EINVAL;
1810
1811 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1812
David Woodhouse9051aa02009-06-29 12:30:54 +01001813 if (sg)
1814 sg_res = 0;
1815 else {
1816 sg_res = nr_pages + 1;
1817 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1818 }
1819
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001820 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001821 uint64_t tmp;
1822
David Woodhousee1605492009-06-29 11:17:38 +01001823 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001824 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001825 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1826 sg->dma_length = sg->length;
1827 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001828 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001829 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001830
David Woodhousee1605492009-06-29 11:17:38 +01001831 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001832 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1833
1834 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01001835 if (!pte)
1836 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001837 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001838 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001839 pteval |= DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001840 /* Ensure that old small page tables are removed to make room
1841 for superpage, if they exist. */
1842 dma_pte_clear_range(domain, iov_pfn,
1843 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1844 dma_pte_free_pagetable(domain, iov_pfn,
1845 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1846 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001847 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001848 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001849
David Woodhousee1605492009-06-29 11:17:38 +01001850 }
1851 /* We don't need lock here, nobody else
1852 * touches the iova range
1853 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001854 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001855 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001856 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001857 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1858 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001859 if (dumps) {
1860 dumps--;
1861 debug_dma_dump_mappings(NULL);
1862 }
1863 WARN_ON(1);
1864 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001865
1866 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1867
1868 BUG_ON(nr_pages < lvl_pages);
1869 BUG_ON(sg_res < lvl_pages);
1870
1871 nr_pages -= lvl_pages;
1872 iov_pfn += lvl_pages;
1873 phys_pfn += lvl_pages;
1874 pteval += lvl_pages * VTD_PAGE_SIZE;
1875 sg_res -= lvl_pages;
1876
1877 /* If the next PTE would be the first in a new page, then we
1878 need to flush the cache on the entries we've just written.
1879 And then we'll need to recalculate 'pte', so clear it and
1880 let it get set again in the if (!pte) block above.
1881
1882 If we're done (!nr_pages) we need to flush the cache too.
1883
1884 Also if we've been setting superpages, we may need to
1885 recalculate 'pte' and switch back to smaller pages for the
1886 end of the mapping, if the trailing size is not enough to
1887 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01001888 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001889 if (!nr_pages || first_pte_in_page(pte) ||
1890 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01001891 domain_flush_cache(domain, first_pte,
1892 (void *)pte - (void *)first_pte);
1893 pte = NULL;
1894 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001895
1896 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01001897 sg = sg_next(sg);
1898 }
1899 return 0;
1900}
1901
David Woodhouse9051aa02009-06-29 12:30:54 +01001902static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1903 struct scatterlist *sg, unsigned long nr_pages,
1904 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001905{
David Woodhouse9051aa02009-06-29 12:30:54 +01001906 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1907}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001908
David Woodhouse9051aa02009-06-29 12:30:54 +01001909static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1910 unsigned long phys_pfn, unsigned long nr_pages,
1911 int prot)
1912{
1913 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001914}
1915
Weidong Hanc7151a82008-12-08 22:51:37 +08001916static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001917{
Weidong Hanc7151a82008-12-08 22:51:37 +08001918 if (!iommu)
1919 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001920
1921 clear_context_table(iommu, bus, devfn);
1922 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001923 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001924 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001925}
1926
David Woodhouse109b9b02012-05-25 17:43:02 +01001927static inline void unlink_domain_info(struct device_domain_info *info)
1928{
1929 assert_spin_locked(&device_domain_lock);
1930 list_del(&info->link);
1931 list_del(&info->global);
1932 if (info->dev)
1933 info->dev->dev.archdata.iommu = NULL;
1934}
1935
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001936static void domain_remove_dev_info(struct dmar_domain *domain)
1937{
1938 struct device_domain_info *info;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001939 unsigned long flags, flags2;
Weidong Hanc7151a82008-12-08 22:51:37 +08001940 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001941
1942 spin_lock_irqsave(&device_domain_lock, flags);
1943 while (!list_empty(&domain->devices)) {
1944 info = list_entry(domain->devices.next,
1945 struct device_domain_info, link);
David Woodhouse109b9b02012-05-25 17:43:02 +01001946 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001947 spin_unlock_irqrestore(&device_domain_lock, flags);
1948
Yu Zhao93a23a72009-05-18 13:51:37 +08001949 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001950 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001951 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001952
Jiang Liu92d03cc2014-02-19 14:07:28 +08001953 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
1954 iommu_detach_dependent_devices(iommu, info->dev);
1955 /* clear this iommu in iommu_bmp, update iommu count
1956 * and capabilities
1957 */
1958 spin_lock_irqsave(&domain->iommu_lock, flags2);
1959 if (test_and_clear_bit(iommu->seq_id,
1960 domain->iommu_bmp)) {
1961 domain->iommu_count--;
1962 domain_update_iommu_cap(domain);
1963 }
1964 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
1965 }
1966
1967 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001968 spin_lock_irqsave(&device_domain_lock, flags);
1969 }
1970 spin_unlock_irqrestore(&device_domain_lock, flags);
1971}
1972
1973/*
1974 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001975 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001976 */
Kay, Allen M38717942008-09-09 18:37:29 +03001977static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001978find_domain(struct pci_dev *pdev)
1979{
1980 struct device_domain_info *info;
1981
1982 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001983 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001984 if (info)
1985 return info->domain;
1986 return NULL;
1987}
1988
Jiang Liu745f2582014-02-19 14:07:26 +08001989static inline struct dmar_domain *
1990dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
1991{
1992 struct device_domain_info *info;
1993
1994 list_for_each_entry(info, &device_domain_list, global)
1995 if (info->segment == segment && info->bus == bus &&
1996 info->devfn == devfn)
1997 return info->domain;
1998
1999 return NULL;
2000}
2001
2002static int dmar_insert_dev_info(int segment, int bus, int devfn,
2003 struct pci_dev *dev, struct dmar_domain **domp)
2004{
2005 struct dmar_domain *found, *domain = *domp;
2006 struct device_domain_info *info;
2007 unsigned long flags;
2008
2009 info = alloc_devinfo_mem();
2010 if (!info)
2011 return -ENOMEM;
2012
2013 info->segment = segment;
2014 info->bus = bus;
2015 info->devfn = devfn;
2016 info->dev = dev;
2017 info->domain = domain;
2018 if (!dev)
2019 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
2020
2021 spin_lock_irqsave(&device_domain_lock, flags);
2022 if (dev)
2023 found = find_domain(dev);
2024 else
2025 found = dmar_search_domain_by_dev_info(segment, bus, devfn);
2026 if (found) {
2027 spin_unlock_irqrestore(&device_domain_lock, flags);
2028 free_devinfo_mem(info);
2029 if (found != domain) {
2030 domain_exit(domain);
2031 *domp = found;
2032 }
2033 } else {
2034 list_add(&info->link, &domain->devices);
2035 list_add(&info->global, &device_domain_list);
2036 if (dev)
2037 dev->dev.archdata.iommu = info;
2038 spin_unlock_irqrestore(&device_domain_lock, flags);
2039 }
2040
2041 return 0;
2042}
2043
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002044/* domain is initialized */
2045static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
2046{
Jiang Liue85bb5d2014-02-19 14:07:27 +08002047 struct dmar_domain *domain, *free = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002048 struct intel_iommu *iommu;
2049 struct dmar_drhd_unit *drhd;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002050 struct pci_dev *dev_tmp;
2051 unsigned long flags;
2052 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01002053 int segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002054
2055 domain = find_domain(pdev);
2056 if (domain)
2057 return domain;
2058
David Woodhouse276dbf992009-04-04 01:45:37 +01002059 segment = pci_domain_nr(pdev->bus);
2060
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002061 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
2062 if (dev_tmp) {
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002063 if (pci_is_pcie(dev_tmp)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002064 bus = dev_tmp->subordinate->number;
2065 devfn = 0;
2066 } else {
2067 bus = dev_tmp->bus->number;
2068 devfn = dev_tmp->devfn;
2069 }
2070 spin_lock_irqsave(&device_domain_lock, flags);
Jiang Liu745f2582014-02-19 14:07:26 +08002071 domain = dmar_search_domain_by_dev_info(segment, bus, devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002072 spin_unlock_irqrestore(&device_domain_lock, flags);
2073 /* pcie-pci bridge already has a domain, uses it */
Jiang Liu745f2582014-02-19 14:07:26 +08002074 if (domain)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002075 goto found_domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002076 }
2077
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002078 drhd = dmar_find_matched_drhd_unit(pdev);
2079 if (!drhd) {
2080 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
2081 pci_name(pdev));
2082 return NULL;
2083 }
2084 iommu = drhd->iommu;
2085
Jiang Liu745f2582014-02-19 14:07:26 +08002086 /* Allocate and intialize new domain for the device */
Jiang Liu92d03cc2014-02-19 14:07:28 +08002087 domain = alloc_domain(false);
Jiang Liu745f2582014-02-19 14:07:26 +08002088 if (!domain)
2089 goto error;
2090 if (iommu_attach_domain(domain, iommu)) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002091 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002092 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002093 }
Jiang Liue85bb5d2014-02-19 14:07:27 +08002094 free = domain;
2095 if (domain_init(domain, gaw))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002096 goto error;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002097
2098 /* register pcie-to-pci device */
2099 if (dev_tmp) {
Jiang Liue85bb5d2014-02-19 14:07:27 +08002100 if (dmar_insert_dev_info(segment, bus, devfn, NULL, &domain))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002101 goto error;
Jiang Liue85bb5d2014-02-19 14:07:27 +08002102 else
2103 free = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002104 }
2105
2106found_domain:
Jiang Liu745f2582014-02-19 14:07:26 +08002107 if (dmar_insert_dev_info(segment, pdev->bus->number, pdev->devfn,
2108 pdev, &domain) == 0)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002109 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002110error:
Jiang Liue85bb5d2014-02-19 14:07:27 +08002111 if (free)
2112 domain_exit(free);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002113 /* recheck it here, maybe others set it */
2114 return find_domain(pdev);
2115}
2116
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002117static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002118#define IDENTMAP_ALL 1
2119#define IDENTMAP_GFX 2
2120#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002121
David Woodhouseb2132032009-06-26 18:50:28 +01002122static int iommu_domain_identity_map(struct dmar_domain *domain,
2123 unsigned long long start,
2124 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002125{
David Woodhousec5395d52009-06-28 16:35:56 +01002126 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2127 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002128
David Woodhousec5395d52009-06-28 16:35:56 +01002129 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2130 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002131 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002132 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002133 }
2134
David Woodhousec5395d52009-06-28 16:35:56 +01002135 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2136 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002137 /*
2138 * RMRR range might have overlap with physical memory range,
2139 * clear it first
2140 */
David Woodhousec5395d52009-06-28 16:35:56 +01002141 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002142
David Woodhousec5395d52009-06-28 16:35:56 +01002143 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2144 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002145 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002146}
2147
2148static int iommu_prepare_identity_map(struct pci_dev *pdev,
2149 unsigned long long start,
2150 unsigned long long end)
2151{
2152 struct dmar_domain *domain;
2153 int ret;
2154
David Woodhousec7ab48d2009-06-26 19:10:36 +01002155 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002156 if (!domain)
2157 return -ENOMEM;
2158
David Woodhouse19943b02009-08-04 16:19:20 +01002159 /* For _hardware_ passthrough, don't bother. But for software
2160 passthrough, we do it anyway -- it may indicate a memory
2161 range which is reserved in E820, so which didn't get set
2162 up to start with in si_domain */
2163 if (domain == si_domain && hw_pass_through) {
2164 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2165 pci_name(pdev), start, end);
2166 return 0;
2167 }
2168
2169 printk(KERN_INFO
2170 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2171 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002172
David Woodhouse5595b522009-12-02 09:21:55 +00002173 if (end < start) {
2174 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2175 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2176 dmi_get_system_info(DMI_BIOS_VENDOR),
2177 dmi_get_system_info(DMI_BIOS_VERSION),
2178 dmi_get_system_info(DMI_PRODUCT_VERSION));
2179 ret = -EIO;
2180 goto error;
2181 }
2182
David Woodhouse2ff729f2009-08-26 14:25:41 +01002183 if (end >> agaw_to_width(domain->agaw)) {
2184 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2185 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2186 agaw_to_width(domain->agaw),
2187 dmi_get_system_info(DMI_BIOS_VENDOR),
2188 dmi_get_system_info(DMI_BIOS_VERSION),
2189 dmi_get_system_info(DMI_PRODUCT_VERSION));
2190 ret = -EIO;
2191 goto error;
2192 }
David Woodhouse19943b02009-08-04 16:19:20 +01002193
David Woodhouseb2132032009-06-26 18:50:28 +01002194 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002195 if (ret)
2196 goto error;
2197
2198 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002199 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002200 if (ret)
2201 goto error;
2202
2203 return 0;
2204
2205 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002206 domain_exit(domain);
2207 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002208}
2209
2210static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2211 struct pci_dev *pdev)
2212{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002213 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002214 return 0;
2215 return iommu_prepare_identity_map(pdev, rmrr->base_address,
David Woodhouse70e535d2011-05-31 00:22:52 +01002216 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002217}
2218
Suresh Siddhad3f13812011-08-23 17:05:25 -07002219#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002220static inline void iommu_prepare_isa(void)
2221{
2222 struct pci_dev *pdev;
2223 int ret;
2224
2225 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2226 if (!pdev)
2227 return;
2228
David Woodhousec7ab48d2009-06-26 19:10:36 +01002229 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse70e535d2011-05-31 00:22:52 +01002230 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002231
2232 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002233 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2234 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002235
2236}
2237#else
2238static inline void iommu_prepare_isa(void)
2239{
2240 return;
2241}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002242#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002243
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002244static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002245
Matt Kraai071e1372009-08-23 22:30:22 -07002246static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002247{
2248 struct dmar_drhd_unit *drhd;
2249 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002250 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002251
Jiang Liu92d03cc2014-02-19 14:07:28 +08002252 si_domain = alloc_domain(false);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002253 if (!si_domain)
2254 return -EFAULT;
2255
Jiang Liu92d03cc2014-02-19 14:07:28 +08002256 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2257
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002258 for_each_active_iommu(iommu, drhd) {
2259 ret = iommu_attach_domain(si_domain, iommu);
2260 if (ret) {
2261 domain_exit(si_domain);
2262 return -EFAULT;
2263 }
2264 }
2265
2266 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2267 domain_exit(si_domain);
2268 return -EFAULT;
2269 }
2270
Jiang Liu9544c002014-01-06 14:18:13 +08002271 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2272 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002273
David Woodhouse19943b02009-08-04 16:19:20 +01002274 if (hw)
2275 return 0;
2276
David Woodhousec7ab48d2009-06-26 19:10:36 +01002277 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002278 unsigned long start_pfn, end_pfn;
2279 int i;
2280
2281 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2282 ret = iommu_domain_identity_map(si_domain,
2283 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2284 if (ret)
2285 return ret;
2286 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002287 }
2288
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002289 return 0;
2290}
2291
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002292static int identity_mapping(struct pci_dev *pdev)
2293{
2294 struct device_domain_info *info;
2295
2296 if (likely(!iommu_identity_mapping))
2297 return 0;
2298
Mike Traviscb452a42011-05-28 13:15:03 -05002299 info = pdev->dev.archdata.iommu;
2300 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2301 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002302
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002303 return 0;
2304}
2305
2306static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002307 struct pci_dev *pdev,
2308 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002309{
2310 struct device_domain_info *info;
2311 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002312 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002313
2314 info = alloc_devinfo_mem();
2315 if (!info)
2316 return -ENOMEM;
2317
2318 info->segment = pci_domain_nr(pdev->bus);
2319 info->bus = pdev->bus->number;
2320 info->devfn = pdev->devfn;
2321 info->dev = pdev;
2322 info->domain = domain;
2323
2324 spin_lock_irqsave(&device_domain_lock, flags);
2325 list_add(&info->link, &domain->devices);
2326 list_add(&info->global, &device_domain_list);
2327 pdev->dev.archdata.iommu = info;
2328 spin_unlock_irqrestore(&device_domain_lock, flags);
2329
David Woodhousee2ad23d2012-05-25 17:42:54 +01002330 ret = domain_context_mapping(domain, pdev, translation);
2331 if (ret) {
2332 spin_lock_irqsave(&device_domain_lock, flags);
David Woodhouse109b9b02012-05-25 17:43:02 +01002333 unlink_domain_info(info);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002334 spin_unlock_irqrestore(&device_domain_lock, flags);
2335 free_devinfo_mem(info);
2336 return ret;
2337 }
2338
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002339 return 0;
2340}
2341
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002342static bool device_has_rmrr(struct pci_dev *dev)
2343{
2344 struct dmar_rmrr_unit *rmrr;
Jiang Liub683b232014-02-19 14:07:32 +08002345 struct pci_dev *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002346 int i;
2347
Jiang Liu0e242612014-02-19 14:07:34 +08002348 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002349 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002350 /*
2351 * Return TRUE if this RMRR contains the device that
2352 * is passed in.
2353 */
2354 for_each_active_dev_scope(rmrr->devices,
2355 rmrr->devices_cnt, i, tmp)
2356 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002357 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002358 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002359 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002360 }
Jiang Liu0e242612014-02-19 14:07:34 +08002361 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002362 return false;
2363}
2364
David Woodhouse6941af22009-07-04 18:24:27 +01002365static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2366{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002367
2368 /*
2369 * We want to prevent any device associated with an RMRR from
2370 * getting placed into the SI Domain. This is done because
2371 * problems exist when devices are moved in and out of domains
2372 * and their respective RMRR info is lost. We exempt USB devices
2373 * from this process due to their usage of RMRRs that are known
2374 * to not be needed after BIOS hand-off to OS.
2375 */
2376 if (device_has_rmrr(pdev) &&
2377 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2378 return 0;
2379
David Woodhousee0fc7e02009-09-30 09:12:17 -07002380 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2381 return 1;
2382
2383 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2384 return 1;
2385
2386 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2387 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002388
David Woodhouse3dfc8132009-07-04 19:11:08 +01002389 /*
2390 * We want to start off with all devices in the 1:1 domain, and
2391 * take them out later if we find they can't access all of memory.
2392 *
2393 * However, we can't do this for PCI devices behind bridges,
2394 * because all PCI devices behind the same bridge will end up
2395 * with the same source-id on their transactions.
2396 *
2397 * Practically speaking, we can't change things around for these
2398 * devices at run-time, because we can't be sure there'll be no
2399 * DMA transactions in flight for any of their siblings.
2400 *
2401 * So PCI devices (unless they're on the root bus) as well as
2402 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2403 * the 1:1 domain, just in _case_ one of their siblings turns out
2404 * not to be able to map all of memory.
2405 */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002406 if (!pci_is_pcie(pdev)) {
David Woodhouse3dfc8132009-07-04 19:11:08 +01002407 if (!pci_is_root_bus(pdev->bus))
2408 return 0;
2409 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2410 return 0;
Yijing Wang62f87c02012-07-24 17:20:03 +08002411 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
David Woodhouse3dfc8132009-07-04 19:11:08 +01002412 return 0;
2413
2414 /*
2415 * At boot time, we don't yet know if devices will be 64-bit capable.
2416 * Assume that they will -- if they turn out not to be, then we can
2417 * take them out of the 1:1 domain later.
2418 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002419 if (!startup) {
2420 /*
2421 * If the device's dma_mask is less than the system's memory
2422 * size then this is not a candidate for identity mapping.
2423 */
2424 u64 dma_mask = pdev->dma_mask;
2425
2426 if (pdev->dev.coherent_dma_mask &&
2427 pdev->dev.coherent_dma_mask < dma_mask)
2428 dma_mask = pdev->dev.coherent_dma_mask;
2429
2430 return dma_mask >= dma_get_required_mask(&pdev->dev);
2431 }
David Woodhouse6941af22009-07-04 18:24:27 +01002432
2433 return 1;
2434}
2435
Matt Kraai071e1372009-08-23 22:30:22 -07002436static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002437{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002438 struct pci_dev *pdev = NULL;
2439 int ret;
2440
David Woodhouse19943b02009-08-04 16:19:20 +01002441 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002442 if (ret)
2443 return -EFAULT;
2444
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002445 for_each_pci_dev(pdev) {
David Woodhouse6941af22009-07-04 18:24:27 +01002446 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse5fe60f42009-08-09 10:53:41 +01002447 ret = domain_add_dev_info(si_domain, pdev,
Mike Traviseae460b2012-03-05 15:05:16 -08002448 hw ? CONTEXT_TT_PASS_THROUGH :
2449 CONTEXT_TT_MULTI_LEVEL);
2450 if (ret) {
2451 /* device not associated with an iommu */
2452 if (ret == -ENODEV)
2453 continue;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002454 return ret;
Mike Traviseae460b2012-03-05 15:05:16 -08002455 }
2456 pr_info("IOMMU: %s identity mapping for device %s\n",
2457 hw ? "hardware" : "software", pci_name(pdev));
David Woodhouse62edf5d2009-07-04 10:59:46 +01002458 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002459 }
2460
2461 return 0;
2462}
2463
Joseph Cihulab7792602011-05-03 00:08:37 -07002464static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002465{
2466 struct dmar_drhd_unit *drhd;
2467 struct dmar_rmrr_unit *rmrr;
2468 struct pci_dev *pdev;
2469 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002470 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002471
2472 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002473 * for each drhd
2474 * allocate root
2475 * initialize and program root entry to not present
2476 * endfor
2477 */
2478 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002479 /*
2480 * lock not needed as this is only incremented in the single
2481 * threaded kernel __init code path all other access are read
2482 * only
2483 */
Mike Travis1b198bb2012-03-05 15:05:16 -08002484 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2485 g_num_of_iommus++;
2486 continue;
2487 }
2488 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2489 IOMMU_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002490 }
2491
Weidong Hand9630fe2008-12-08 11:06:32 +08002492 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2493 GFP_KERNEL);
2494 if (!g_iommus) {
2495 printk(KERN_ERR "Allocating global iommu array failed\n");
2496 ret = -ENOMEM;
2497 goto error;
2498 }
2499
mark gross80b20dd2008-04-18 13:53:58 -07002500 deferred_flush = kzalloc(g_num_of_iommus *
2501 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2502 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002503 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08002504 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08002505 }
2506
Jiang Liu7c919772014-01-06 14:18:18 +08002507 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002508 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002509
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002510 ret = iommu_init_domains(iommu);
2511 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002512 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002513
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002514 /*
2515 * TBD:
2516 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002517 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002518 */
2519 ret = iommu_alloc_root_entry(iommu);
2520 if (ret) {
2521 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002522 goto free_iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002523 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002524 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002525 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002526 }
2527
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002528 /*
2529 * Start from the sane iommu hardware state.
2530 */
Jiang Liu7c919772014-01-06 14:18:18 +08002531 for_each_active_iommu(iommu, drhd) {
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002532 /*
2533 * If the queued invalidation is already initialized by us
2534 * (for example, while enabling interrupt-remapping) then
2535 * we got the things already rolling from a sane state.
2536 */
2537 if (iommu->qi)
2538 continue;
2539
2540 /*
2541 * Clear any previous faults.
2542 */
2543 dmar_fault(-1, iommu);
2544 /*
2545 * Disable queued invalidation if supported and already enabled
2546 * before OS handover.
2547 */
2548 dmar_disable_qi(iommu);
2549 }
2550
Jiang Liu7c919772014-01-06 14:18:18 +08002551 for_each_active_iommu(iommu, drhd) {
Youquan Songa77b67d2008-10-16 16:31:56 -07002552 if (dmar_enable_qi(iommu)) {
2553 /*
2554 * Queued Invalidate not enabled, use Register Based
2555 * Invalidate
2556 */
2557 iommu->flush.flush_context = __iommu_flush_context;
2558 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002559 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002560 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002561 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002562 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002563 } else {
2564 iommu->flush.flush_context = qi_flush_context;
2565 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002566 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002567 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002568 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002569 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002570 }
2571 }
2572
David Woodhouse19943b02009-08-04 16:19:20 +01002573 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002574 iommu_identity_mapping |= IDENTMAP_ALL;
2575
Suresh Siddhad3f13812011-08-23 17:05:25 -07002576#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002577 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002578#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002579
2580 check_tylersburg_isoch();
2581
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002582 /*
2583 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002584 * identity mappings for rmrr, gfx, and isa and may fall back to static
2585 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002586 */
David Woodhouse19943b02009-08-04 16:19:20 +01002587 if (iommu_identity_mapping) {
2588 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2589 if (ret) {
2590 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002591 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002592 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002593 }
David Woodhouse19943b02009-08-04 16:19:20 +01002594 /*
2595 * For each rmrr
2596 * for each dev attached to rmrr
2597 * do
2598 * locate drhd for dev, alloc domain for dev
2599 * allocate free domain
2600 * allocate page table entries for rmrr
2601 * if context not allocated for bus
2602 * allocate and init context
2603 * set present in root table for this bus
2604 * init context with domain, translation etc
2605 * endfor
2606 * endfor
2607 */
2608 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2609 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002610 /* some BIOS lists non-exist devices in DMAR table. */
2611 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
2612 i, pdev) {
David Woodhouse19943b02009-08-04 16:19:20 +01002613 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2614 if (ret)
2615 printk(KERN_ERR
2616 "IOMMU: mapping reserved region failed\n");
2617 }
2618 }
2619
2620 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002621
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002622 /*
2623 * for each drhd
2624 * enable fault log
2625 * global invalidate context cache
2626 * global invalidate iotlb
2627 * enable translation
2628 */
Jiang Liu7c919772014-01-06 14:18:18 +08002629 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002630 if (drhd->ignored) {
2631 /*
2632 * we always have to disable PMRs or DMA may fail on
2633 * this device
2634 */
2635 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08002636 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002637 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002638 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002639
2640 iommu_flush_write_buffer(iommu);
2641
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002642 ret = dmar_set_interrupt(iommu);
2643 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002644 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002645
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002646 iommu_set_root_entry(iommu);
2647
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002648 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002649 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002650
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002651 ret = iommu_enable_translation(iommu);
2652 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002653 goto free_iommu;
David Woodhouseb94996c2009-09-19 15:28:12 -07002654
2655 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002656 }
2657
2658 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08002659
2660free_iommu:
Jiang Liu7c919772014-01-06 14:18:18 +08002661 for_each_active_iommu(iommu, drhd)
Jiang Liua868e6b2014-01-06 14:18:20 +08002662 free_dmar_iommu(iommu);
Jiang Liu9bdc5312014-01-06 14:18:27 +08002663 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08002664free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08002665 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08002666error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002667 return ret;
2668}
2669
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002670/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002671static struct iova *intel_alloc_iova(struct device *dev,
2672 struct dmar_domain *domain,
2673 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002674{
2675 struct pci_dev *pdev = to_pci_dev(dev);
2676 struct iova *iova = NULL;
2677
David Woodhouse875764d2009-06-28 21:20:51 +01002678 /* Restrict dma_mask to the width that the iommu can handle */
2679 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2680
2681 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002682 /*
2683 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002684 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002685 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002686 */
David Woodhouse875764d2009-06-28 21:20:51 +01002687 iova = alloc_iova(&domain->iovad, nrpages,
2688 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2689 if (iova)
2690 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002691 }
David Woodhouse875764d2009-06-28 21:20:51 +01002692 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2693 if (unlikely(!iova)) {
2694 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2695 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002696 return NULL;
2697 }
2698
2699 return iova;
2700}
2701
David Woodhouse147202a2009-07-07 19:43:20 +01002702static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002703{
2704 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002705 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002706
2707 domain = get_domain_for_dev(pdev,
2708 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2709 if (!domain) {
2710 printk(KERN_ERR
2711 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002712 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002713 }
2714
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002715 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002716 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002717 ret = domain_context_mapping(domain, pdev,
2718 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002719 if (ret) {
2720 printk(KERN_ERR
2721 "Domain context map for %s failed",
2722 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002723 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002724 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002725 }
2726
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002727 return domain;
2728}
2729
David Woodhouse147202a2009-07-07 19:43:20 +01002730static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2731{
2732 struct device_domain_info *info;
2733
2734 /* No lock here, assumes no domain exit in normal case */
2735 info = dev->dev.archdata.iommu;
2736 if (likely(info))
2737 return info->domain;
2738
2739 return __get_valid_domain_for_dev(dev);
2740}
2741
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002742static int iommu_dummy(struct pci_dev *pdev)
2743{
2744 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2745}
2746
2747/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002748static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002749{
David Woodhouse73676832009-07-04 14:08:36 +01002750 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002751 int found;
2752
Yijing Wangdbad0862013-12-05 19:43:42 +08002753 if (unlikely(!dev_is_pci(dev)))
David Woodhouse73676832009-07-04 14:08:36 +01002754 return 1;
2755
2756 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002757 if (iommu_dummy(pdev))
2758 return 1;
2759
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002760 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002761 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002762
2763 found = identity_mapping(pdev);
2764 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002765 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002766 return 1;
2767 else {
2768 /*
2769 * 32 bit DMA is removed from si_domain and fall back
2770 * to non-identity mapping.
2771 */
2772 domain_remove_one_dev_info(si_domain, pdev);
2773 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2774 pci_name(pdev));
2775 return 0;
2776 }
2777 } else {
2778 /*
2779 * In case of a detached 64 bit DMA device from vm, the device
2780 * is put into si_domain for identity mapping.
2781 */
David Woodhouse6941af22009-07-04 18:24:27 +01002782 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002783 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002784 ret = domain_add_dev_info(si_domain, pdev,
2785 hw_pass_through ?
2786 CONTEXT_TT_PASS_THROUGH :
2787 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002788 if (!ret) {
2789 printk(KERN_INFO "64bit %s uses identity mapping\n",
2790 pci_name(pdev));
2791 return 1;
2792 }
2793 }
2794 }
2795
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002796 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002797}
2798
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002799static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2800 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002801{
2802 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002803 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002804 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002805 struct iova *iova;
2806 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002807 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002808 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002809 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002810
2811 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002812
David Woodhouse73676832009-07-04 14:08:36 +01002813 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002814 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002815
2816 domain = get_valid_domain_for_dev(pdev);
2817 if (!domain)
2818 return 0;
2819
Weidong Han8c11e792008-12-08 15:29:22 +08002820 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002821 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002822
Mike Travisc681d0b2011-05-28 13:15:05 -05002823 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002824 if (!iova)
2825 goto error;
2826
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002827 /*
2828 * Check if DMAR supports zero-length reads on write only
2829 * mappings..
2830 */
2831 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002832 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002833 prot |= DMA_PTE_READ;
2834 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2835 prot |= DMA_PTE_WRITE;
2836 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002837 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002838 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002839 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002840 * is not a big problem
2841 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002842 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002843 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002844 if (ret)
2845 goto error;
2846
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002847 /* it's a non-present to present mapping. Only flush if caching mode */
2848 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002849 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002850 else
Weidong Han8c11e792008-12-08 15:29:22 +08002851 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002852
David Woodhouse03d6a242009-06-28 15:33:46 +01002853 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2854 start_paddr += paddr & ~PAGE_MASK;
2855 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002856
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002857error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002858 if (iova)
2859 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002860 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002861 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002862 return 0;
2863}
2864
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002865static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2866 unsigned long offset, size_t size,
2867 enum dma_data_direction dir,
2868 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002869{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002870 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2871 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002872}
2873
mark gross5e0d2a62008-03-04 15:22:08 -08002874static void flush_unmaps(void)
2875{
mark gross80b20dd2008-04-18 13:53:58 -07002876 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002877
mark gross5e0d2a62008-03-04 15:22:08 -08002878 timer_on = 0;
2879
2880 /* just flush them all */
2881 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002882 struct intel_iommu *iommu = g_iommus[i];
2883 if (!iommu)
2884 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002885
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002886 if (!deferred_flush[i].next)
2887 continue;
2888
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002889 /* In caching mode, global flushes turn emulation expensive */
2890 if (!cap_caching_mode(iommu->cap))
2891 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002892 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002893 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002894 unsigned long mask;
2895 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002896 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08002897
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002898 /* On real hardware multiple invalidations are expensive */
2899 if (cap_caching_mode(iommu->cap))
2900 iommu_flush_iotlb_psi(iommu, domain->id,
2901 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2902 else {
2903 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2904 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2905 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2906 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002907 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002908 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002909 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002910 }
2911
mark gross5e0d2a62008-03-04 15:22:08 -08002912 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002913}
2914
2915static void flush_unmaps_timeout(unsigned long data)
2916{
mark gross80b20dd2008-04-18 13:53:58 -07002917 unsigned long flags;
2918
2919 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002920 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002921 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002922}
2923
2924static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2925{
2926 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002927 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002928 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002929
2930 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002931 if (list_size == HIGH_WATER_MARK)
2932 flush_unmaps();
2933
Weidong Han8c11e792008-12-08 15:29:22 +08002934 iommu = domain_get_iommu(dom);
2935 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002936
mark gross80b20dd2008-04-18 13:53:58 -07002937 next = deferred_flush[iommu_id].next;
2938 deferred_flush[iommu_id].domain[next] = dom;
2939 deferred_flush[iommu_id].iova[next] = iova;
2940 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002941
2942 if (!timer_on) {
2943 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2944 timer_on = 1;
2945 }
2946 list_size++;
2947 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2948}
2949
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002950static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2951 size_t size, enum dma_data_direction dir,
2952 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002953{
2954 struct pci_dev *pdev = to_pci_dev(dev);
2955 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002956 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002957 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002958 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002959
David Woodhouse73676832009-07-04 14:08:36 +01002960 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002961 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002962
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002963 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002964 BUG_ON(!domain);
2965
Weidong Han8c11e792008-12-08 15:29:22 +08002966 iommu = domain_get_iommu(domain);
2967
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002968 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002969 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2970 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002971 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002972
David Woodhoused794dc92009-06-28 00:27:49 +01002973 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2974 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002975
David Woodhoused794dc92009-06-28 00:27:49 +01002976 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2977 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002978
2979 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002980 dma_pte_clear_range(domain, start_pfn, last_pfn);
2981
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002982 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002983 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2984
mark gross5e0d2a62008-03-04 15:22:08 -08002985 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002986 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002987 last_pfn - start_pfn + 1, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08002988 /* free iova */
2989 __free_iova(&domain->iovad, iova);
2990 } else {
2991 add_unmap(domain, iova);
2992 /*
2993 * queue up the release of the unmap to save the 1/6th of the
2994 * cpu used up by the iotlb flush operation...
2995 */
mark gross5e0d2a62008-03-04 15:22:08 -08002996 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002997}
2998
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002999static void *intel_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003000 dma_addr_t *dma_handle, gfp_t flags,
3001 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003002{
3003 void *vaddr;
3004 int order;
3005
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003006 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003007 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003008
3009 if (!iommu_no_mapping(hwdev))
3010 flags &= ~(GFP_DMA | GFP_DMA32);
3011 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
3012 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
3013 flags |= GFP_DMA;
3014 else
3015 flags |= GFP_DMA32;
3016 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003017
3018 vaddr = (void *)__get_free_pages(flags, order);
3019 if (!vaddr)
3020 return NULL;
3021 memset(vaddr, 0, size);
3022
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003023 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
3024 DMA_BIDIRECTIONAL,
3025 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003026 if (*dma_handle)
3027 return vaddr;
3028 free_pages((unsigned long)vaddr, order);
3029 return NULL;
3030}
3031
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003032static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003033 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003034{
3035 int order;
3036
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003037 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003038 order = get_order(size);
3039
David Woodhouse0db9b7a2009-07-14 02:01:57 +01003040 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003041 free_pages((unsigned long)vaddr, order);
3042}
3043
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003044static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
3045 int nelems, enum dma_data_direction dir,
3046 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003047{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003048 struct pci_dev *pdev = to_pci_dev(hwdev);
3049 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003050 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003051 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003052 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003053
David Woodhouse73676832009-07-04 14:08:36 +01003054 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003055 return;
3056
3057 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08003058 BUG_ON(!domain);
3059
3060 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003061
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003062 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01003063 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3064 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003065 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003066
David Woodhoused794dc92009-06-28 00:27:49 +01003067 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3068 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003069
3070 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01003071 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003072
David Woodhoused794dc92009-06-28 00:27:49 +01003073 /* free page tables */
3074 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3075
David Woodhouseacea0012009-07-14 01:55:11 +01003076 if (intel_iommu_strict) {
3077 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03003078 last_pfn - start_pfn + 1, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003079 /* free iova */
3080 __free_iova(&domain->iovad, iova);
3081 } else {
3082 add_unmap(domain, iova);
3083 /*
3084 * queue up the release of the unmap to save the 1/6th of the
3085 * cpu used up by the iotlb flush operation...
3086 */
3087 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003088}
3089
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003090static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003091 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003092{
3093 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003094 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003095
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003096 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003097 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003098 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003099 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003100 }
3101 return nelems;
3102}
3103
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003104static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3105 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003106{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003107 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003108 struct pci_dev *pdev = to_pci_dev(hwdev);
3109 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003110 size_t size = 0;
3111 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003112 struct iova *iova = NULL;
3113 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003114 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003115 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003116 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003117
3118 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01003119 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003120 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003121
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003122 domain = get_valid_domain_for_dev(pdev);
3123 if (!domain)
3124 return 0;
3125
Weidong Han8c11e792008-12-08 15:29:22 +08003126 iommu = domain_get_iommu(domain);
3127
David Woodhouseb536d242009-06-28 14:49:31 +01003128 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003129 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003130
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003131 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3132 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003133 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003134 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003135 return 0;
3136 }
3137
3138 /*
3139 * Check if DMAR supports zero-length reads on write only
3140 * mappings..
3141 */
3142 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003143 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003144 prot |= DMA_PTE_READ;
3145 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3146 prot |= DMA_PTE_WRITE;
3147
David Woodhouseb536d242009-06-28 14:49:31 +01003148 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003149
Fenghua Yuf5329592009-08-04 15:09:37 -07003150 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003151 if (unlikely(ret)) {
3152 /* clear the page */
3153 dma_pte_clear_range(domain, start_vpfn,
3154 start_vpfn + size - 1);
3155 /* free page tables */
3156 dma_pte_free_pagetable(domain, start_vpfn,
3157 start_vpfn + size - 1);
3158 /* free iova */
3159 __free_iova(&domain->iovad, iova);
3160 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003161 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003162
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003163 /* it's a non-present to present mapping. Only flush if caching mode */
3164 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03003165 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003166 else
Weidong Han8c11e792008-12-08 15:29:22 +08003167 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003168
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003169 return nelems;
3170}
3171
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003172static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3173{
3174 return !dma_addr;
3175}
3176
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003177struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003178 .alloc = intel_alloc_coherent,
3179 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003180 .map_sg = intel_map_sg,
3181 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003182 .map_page = intel_map_page,
3183 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003184 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003185};
3186
3187static inline int iommu_domain_cache_init(void)
3188{
3189 int ret = 0;
3190
3191 iommu_domain_cache = kmem_cache_create("iommu_domain",
3192 sizeof(struct dmar_domain),
3193 0,
3194 SLAB_HWCACHE_ALIGN,
3195
3196 NULL);
3197 if (!iommu_domain_cache) {
3198 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3199 ret = -ENOMEM;
3200 }
3201
3202 return ret;
3203}
3204
3205static inline int iommu_devinfo_cache_init(void)
3206{
3207 int ret = 0;
3208
3209 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3210 sizeof(struct device_domain_info),
3211 0,
3212 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003213 NULL);
3214 if (!iommu_devinfo_cache) {
3215 printk(KERN_ERR "Couldn't create devinfo cache\n");
3216 ret = -ENOMEM;
3217 }
3218
3219 return ret;
3220}
3221
3222static inline int iommu_iova_cache_init(void)
3223{
3224 int ret = 0;
3225
3226 iommu_iova_cache = kmem_cache_create("iommu_iova",
3227 sizeof(struct iova),
3228 0,
3229 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003230 NULL);
3231 if (!iommu_iova_cache) {
3232 printk(KERN_ERR "Couldn't create iova cache\n");
3233 ret = -ENOMEM;
3234 }
3235
3236 return ret;
3237}
3238
3239static int __init iommu_init_mempool(void)
3240{
3241 int ret;
3242 ret = iommu_iova_cache_init();
3243 if (ret)
3244 return ret;
3245
3246 ret = iommu_domain_cache_init();
3247 if (ret)
3248 goto domain_error;
3249
3250 ret = iommu_devinfo_cache_init();
3251 if (!ret)
3252 return ret;
3253
3254 kmem_cache_destroy(iommu_domain_cache);
3255domain_error:
3256 kmem_cache_destroy(iommu_iova_cache);
3257
3258 return -ENOMEM;
3259}
3260
3261static void __init iommu_exit_mempool(void)
3262{
3263 kmem_cache_destroy(iommu_devinfo_cache);
3264 kmem_cache_destroy(iommu_domain_cache);
3265 kmem_cache_destroy(iommu_iova_cache);
3266
3267}
3268
Dan Williams556ab452010-07-23 15:47:56 -07003269static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3270{
3271 struct dmar_drhd_unit *drhd;
3272 u32 vtbar;
3273 int rc;
3274
3275 /* We know that this device on this chipset has its own IOMMU.
3276 * If we find it under a different IOMMU, then the BIOS is lying
3277 * to us. Hope that the IOMMU for this device is actually
3278 * disabled, and it needs no translation...
3279 */
3280 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3281 if (rc) {
3282 /* "can't" happen */
3283 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3284 return;
3285 }
3286 vtbar &= 0xffff0000;
3287
3288 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3289 drhd = dmar_find_matched_drhd_unit(pdev);
3290 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3291 TAINT_FIRMWARE_WORKAROUND,
3292 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3293 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3294}
3295DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3296
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003297static void __init init_no_remapping_devices(void)
3298{
3299 struct dmar_drhd_unit *drhd;
Jiang Liub683b232014-02-19 14:07:32 +08003300 struct pci_dev *dev;
3301 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003302
3303 for_each_drhd_unit(drhd) {
3304 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003305 for_each_active_dev_scope(drhd->devices,
3306 drhd->devices_cnt, i, dev)
3307 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003308 /* ignore DMAR unit if no pci devices exist */
3309 if (i == drhd->devices_cnt)
3310 drhd->ignored = 1;
3311 }
3312 }
3313
Jiang Liu7c919772014-01-06 14:18:18 +08003314 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003315 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003316 continue;
3317
Jiang Liub683b232014-02-19 14:07:32 +08003318 for_each_active_dev_scope(drhd->devices,
3319 drhd->devices_cnt, i, dev)
3320 if (!IS_GFX_DEVICE(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003321 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003322 if (i < drhd->devices_cnt)
3323 continue;
3324
David Woodhousec0771df2011-10-14 20:59:46 +01003325 /* This IOMMU has *only* gfx devices. Either bypass it or
3326 set the gfx_mapped flag, as appropriate */
3327 if (dmar_map_gfx) {
3328 intel_iommu_gfx_mapped = 1;
3329 } else {
3330 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003331 for_each_active_dev_scope(drhd->devices,
3332 drhd->devices_cnt, i, dev)
3333 dev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003334 }
3335 }
3336}
3337
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003338#ifdef CONFIG_SUSPEND
3339static int init_iommu_hw(void)
3340{
3341 struct dmar_drhd_unit *drhd;
3342 struct intel_iommu *iommu = NULL;
3343
3344 for_each_active_iommu(iommu, drhd)
3345 if (iommu->qi)
3346 dmar_reenable_qi(iommu);
3347
Joseph Cihulab7792602011-05-03 00:08:37 -07003348 for_each_iommu(iommu, drhd) {
3349 if (drhd->ignored) {
3350 /*
3351 * we always have to disable PMRs or DMA may fail on
3352 * this device
3353 */
3354 if (force_on)
3355 iommu_disable_protect_mem_regions(iommu);
3356 continue;
3357 }
3358
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003359 iommu_flush_write_buffer(iommu);
3360
3361 iommu_set_root_entry(iommu);
3362
3363 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003364 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003365 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003366 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003367 if (iommu_enable_translation(iommu))
3368 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003369 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003370 }
3371
3372 return 0;
3373}
3374
3375static void iommu_flush_all(void)
3376{
3377 struct dmar_drhd_unit *drhd;
3378 struct intel_iommu *iommu;
3379
3380 for_each_active_iommu(iommu, drhd) {
3381 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003382 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003383 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003384 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003385 }
3386}
3387
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003388static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003389{
3390 struct dmar_drhd_unit *drhd;
3391 struct intel_iommu *iommu = NULL;
3392 unsigned long flag;
3393
3394 for_each_active_iommu(iommu, drhd) {
3395 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3396 GFP_ATOMIC);
3397 if (!iommu->iommu_state)
3398 goto nomem;
3399 }
3400
3401 iommu_flush_all();
3402
3403 for_each_active_iommu(iommu, drhd) {
3404 iommu_disable_translation(iommu);
3405
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003406 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003407
3408 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3409 readl(iommu->reg + DMAR_FECTL_REG);
3410 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3411 readl(iommu->reg + DMAR_FEDATA_REG);
3412 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3413 readl(iommu->reg + DMAR_FEADDR_REG);
3414 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3415 readl(iommu->reg + DMAR_FEUADDR_REG);
3416
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003417 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003418 }
3419 return 0;
3420
3421nomem:
3422 for_each_active_iommu(iommu, drhd)
3423 kfree(iommu->iommu_state);
3424
3425 return -ENOMEM;
3426}
3427
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003428static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003429{
3430 struct dmar_drhd_unit *drhd;
3431 struct intel_iommu *iommu = NULL;
3432 unsigned long flag;
3433
3434 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003435 if (force_on)
3436 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3437 else
3438 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003439 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003440 }
3441
3442 for_each_active_iommu(iommu, drhd) {
3443
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003444 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003445
3446 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3447 iommu->reg + DMAR_FECTL_REG);
3448 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3449 iommu->reg + DMAR_FEDATA_REG);
3450 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3451 iommu->reg + DMAR_FEADDR_REG);
3452 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3453 iommu->reg + DMAR_FEUADDR_REG);
3454
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003455 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003456 }
3457
3458 for_each_active_iommu(iommu, drhd)
3459 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003460}
3461
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003462static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003463 .resume = iommu_resume,
3464 .suspend = iommu_suspend,
3465};
3466
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003467static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003468{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003469 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003470}
3471
3472#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003473static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003474#endif /* CONFIG_PM */
3475
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003476static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3477{
3478 list_add(&rmrr->list, &dmar_rmrr_units);
3479}
3480
3481
3482int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3483{
3484 struct acpi_dmar_reserved_memory *rmrr;
3485 struct dmar_rmrr_unit *rmrru;
3486
3487 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3488 if (!rmrru)
3489 return -ENOMEM;
3490
3491 rmrru->hdr = header;
3492 rmrr = (struct acpi_dmar_reserved_memory *)header;
3493 rmrru->base_address = rmrr->base_address;
3494 rmrru->end_address = rmrr->end_address;
3495
3496 dmar_register_rmrr_unit(rmrru);
3497 return 0;
3498}
3499
3500static int __init
3501rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3502{
3503 struct acpi_dmar_reserved_memory *rmrr;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003504
3505 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
Jiang Liu9bdc5312014-01-06 14:18:27 +08003506 return dmar_parse_dev_scope((void *)(rmrr + 1),
3507 ((void *)rmrr) + rmrr->header.length,
3508 &rmrru->devices_cnt, &rmrru->devices,
3509 rmrr->segment);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003510}
3511
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003512int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3513{
3514 struct acpi_dmar_atsr *atsr;
3515 struct dmar_atsr_unit *atsru;
3516
3517 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3518 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3519 if (!atsru)
3520 return -ENOMEM;
3521
3522 atsru->hdr = hdr;
3523 atsru->include_all = atsr->flags & 0x1;
3524
Jiang Liu0e242612014-02-19 14:07:34 +08003525 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003526
3527 return 0;
3528}
3529
3530static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3531{
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003532 struct acpi_dmar_atsr *atsr;
3533
3534 if (atsru->include_all)
3535 return 0;
3536
3537 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
Jiang Liu9bdc5312014-01-06 14:18:27 +08003538 return dmar_parse_dev_scope((void *)(atsr + 1),
3539 (void *)atsr + atsr->header.length,
3540 &atsru->devices_cnt, &atsru->devices,
3541 atsr->segment);
3542}
3543
3544static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3545{
3546 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3547 kfree(atsru);
3548}
3549
3550static void intel_iommu_free_dmars(void)
3551{
3552 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3553 struct dmar_atsr_unit *atsru, *atsr_n;
3554
3555 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3556 list_del(&rmrru->list);
3557 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3558 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003559 }
3560
Jiang Liu9bdc5312014-01-06 14:18:27 +08003561 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3562 list_del(&atsru->list);
3563 intel_iommu_free_atsr(atsru);
3564 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003565}
3566
3567int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3568{
Jiang Liub683b232014-02-19 14:07:32 +08003569 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003570 struct pci_bus *bus;
Jiang Liub683b232014-02-19 14:07:32 +08003571 struct pci_dev *bridge = NULL, *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003572 struct acpi_dmar_atsr *atsr;
3573 struct dmar_atsr_unit *atsru;
3574
3575 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003576 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08003577 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003578 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003579 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003580 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003581 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003582 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003583 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08003584 if (!bridge)
3585 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003586
Jiang Liu0e242612014-02-19 14:07:34 +08003587 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08003588 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3589 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3590 if (atsr->segment != pci_domain_nr(dev->bus))
3591 continue;
3592
Jiang Liub683b232014-02-19 14:07:32 +08003593 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
3594 if (tmp == bridge)
3595 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003596
3597 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08003598 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003599 }
Jiang Liub683b232014-02-19 14:07:32 +08003600 ret = 0;
3601out:
Jiang Liu0e242612014-02-19 14:07:34 +08003602 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003603
Jiang Liub683b232014-02-19 14:07:32 +08003604 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003605}
3606
Sergey Senozhatskyc8f369a2011-10-26 18:45:39 +03003607int __init dmar_parse_rmrr_atsr_dev(void)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003608{
Jiang Liu9bdc5312014-01-06 14:18:27 +08003609 struct dmar_rmrr_unit *rmrr;
3610 struct dmar_atsr_unit *atsr;
Jiang Liub683b232014-02-19 14:07:32 +08003611 int ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003612
Jiang Liu9bdc5312014-01-06 14:18:27 +08003613 list_for_each_entry(rmrr, &dmar_rmrr_units, list) {
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003614 ret = rmrr_parse_dev(rmrr);
3615 if (ret)
3616 return ret;
3617 }
3618
Jiang Liu0e242612014-02-19 14:07:34 +08003619 list_for_each_entry_rcu(atsr, &dmar_atsr_units, list) {
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003620 ret = atsr_parse_dev(atsr);
3621 if (ret)
3622 return ret;
3623 }
3624
Jiang Liub683b232014-02-19 14:07:32 +08003625 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003626}
3627
Jiang Liu59ce0512014-02-19 14:07:35 +08003628int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3629{
3630 int ret = 0;
3631 struct dmar_rmrr_unit *rmrru;
3632 struct dmar_atsr_unit *atsru;
3633 struct acpi_dmar_atsr *atsr;
3634 struct acpi_dmar_reserved_memory *rmrr;
3635
3636 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3637 return 0;
3638
3639 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3640 rmrr = container_of(rmrru->hdr,
3641 struct acpi_dmar_reserved_memory, header);
3642 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3643 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3644 ((void *)rmrr) + rmrr->header.length,
3645 rmrr->segment, rmrru->devices,
3646 rmrru->devices_cnt);
3647 if (ret > 0)
3648 break;
3649 else if(ret < 0)
3650 return ret;
3651 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3652 if (dmar_remove_dev_scope(info, rmrr->segment,
3653 rmrru->devices, rmrru->devices_cnt))
3654 break;
3655 }
3656 }
3657
3658 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3659 if (atsru->include_all)
3660 continue;
3661
3662 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3663 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3664 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3665 (void *)atsr + atsr->header.length,
3666 atsr->segment, atsru->devices,
3667 atsru->devices_cnt);
3668 if (ret > 0)
3669 break;
3670 else if(ret < 0)
3671 return ret;
3672 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3673 if (dmar_remove_dev_scope(info, atsr->segment,
3674 atsru->devices, atsru->devices_cnt))
3675 break;
3676 }
3677 }
3678
3679 return 0;
3680}
3681
Fenghua Yu99dcade2009-11-11 07:23:06 -08003682/*
3683 * Here we only respond to action of unbound device from driver.
3684 *
3685 * Added device is not attached to its DMAR domain here yet. That will happen
3686 * when mapping the device to iova.
3687 */
3688static int device_notifier(struct notifier_block *nb,
3689 unsigned long action, void *data)
3690{
3691 struct device *dev = data;
3692 struct pci_dev *pdev = to_pci_dev(dev);
3693 struct dmar_domain *domain;
3694
Jiang Liu816997d2014-02-19 14:07:22 +08003695 if (iommu_dummy(pdev))
David Woodhouse44cd6132009-12-02 10:18:30 +00003696 return 0;
3697
Jiang Liu7e7dfab2014-02-19 14:07:23 +08003698 if (action != BUS_NOTIFY_UNBOUND_DRIVER &&
3699 action != BUS_NOTIFY_DEL_DEVICE)
3700 return 0;
3701
Fenghua Yu99dcade2009-11-11 07:23:06 -08003702 domain = find_domain(pdev);
3703 if (!domain)
3704 return 0;
3705
Jiang Liu3a5670e2014-02-19 14:07:33 +08003706 down_read(&dmar_global_lock);
Jiang Liu7e7dfab2014-02-19 14:07:23 +08003707 domain_remove_one_dev_info(domain, pdev);
3708 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3709 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3710 list_empty(&domain->devices))
3711 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08003712 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07003713
Fenghua Yu99dcade2009-11-11 07:23:06 -08003714 return 0;
3715}
3716
3717static struct notifier_block device_nb = {
3718 .notifier_call = device_notifier,
3719};
3720
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003721int __init intel_iommu_init(void)
3722{
Jiang Liu9bdc5312014-01-06 14:18:27 +08003723 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09003724 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08003725 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003726
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003727 /* VT-d is required for a TXT/tboot launch, so enforce that */
3728 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003729
Jiang Liu3a5670e2014-02-19 14:07:33 +08003730 if (iommu_init_mempool()) {
3731 if (force_on)
3732 panic("tboot: Failed to initialize iommu memory\n");
3733 return -ENOMEM;
3734 }
3735
3736 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003737 if (dmar_table_init()) {
3738 if (force_on)
3739 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003740 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003741 }
3742
Takao Indoh3a93c842013-04-23 17:35:03 +09003743 /*
3744 * Disable translation if already enabled prior to OS handover.
3745 */
Jiang Liu7c919772014-01-06 14:18:18 +08003746 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09003747 if (iommu->gcmd & DMA_GCMD_TE)
3748 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09003749
Suresh Siddhac2c72862011-08-23 17:05:19 -07003750 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003751 if (force_on)
3752 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003753 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003754 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003755
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003756 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08003757 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07003758
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003759 if (list_empty(&dmar_rmrr_units))
3760 printk(KERN_INFO "DMAR: No RMRR found\n");
3761
3762 if (list_empty(&dmar_atsr_units))
3763 printk(KERN_INFO "DMAR: No ATSR found\n");
3764
Joseph Cihula51a63e62011-03-21 11:04:24 -07003765 if (dmar_init_reserved_ranges()) {
3766 if (force_on)
3767 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08003768 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07003769 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003770
3771 init_no_remapping_devices();
3772
Joseph Cihulab7792602011-05-03 00:08:37 -07003773 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003774 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003775 if (force_on)
3776 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003777 printk(KERN_ERR "IOMMU: dmar init failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003778 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003779 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08003780 up_write(&dmar_global_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003781 printk(KERN_INFO
3782 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3783
mark gross5e0d2a62008-03-04 15:22:08 -08003784 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003785#ifdef CONFIG_SWIOTLB
3786 swiotlb = 0;
3787#endif
David Woodhouse19943b02009-08-04 16:19:20 +01003788 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003789
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003790 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003791
Joerg Roedel4236d97d2011-09-06 17:56:07 +02003792 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003793
Fenghua Yu99dcade2009-11-11 07:23:06 -08003794 bus_register_notifier(&pci_bus_type, &device_nb);
3795
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02003796 intel_iommu_enabled = 1;
3797
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003798 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08003799
3800out_free_reserved_range:
3801 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08003802out_free_dmar:
3803 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08003804 up_write(&dmar_global_lock);
3805 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08003806 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003807}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003808
Han, Weidong3199aa62009-02-26 17:31:12 +08003809static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3810 struct pci_dev *pdev)
3811{
3812 struct pci_dev *tmp, *parent;
3813
3814 if (!iommu || !pdev)
3815 return;
3816
3817 /* dependent device detach */
3818 tmp = pci_find_upstream_pcie_bridge(pdev);
3819 /* Secondary interface's bus number and devfn 0 */
3820 if (tmp) {
3821 parent = pdev->bus->self;
3822 while (parent != tmp) {
3823 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003824 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003825 parent = parent->bus->self;
3826 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05003827 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08003828 iommu_detach_dev(iommu,
3829 tmp->subordinate->number, 0);
3830 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003831 iommu_detach_dev(iommu, tmp->bus->number,
3832 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003833 }
3834}
3835
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003836static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003837 struct pci_dev *pdev)
3838{
Yijing Wangbca2b912013-10-31 17:26:04 +08003839 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08003840 struct intel_iommu *iommu;
3841 unsigned long flags;
3842 int found = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +08003843
David Woodhouse276dbf992009-04-04 01:45:37 +01003844 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3845 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003846 if (!iommu)
3847 return;
3848
3849 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08003850 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
Mike Habeck8519dc42011-05-28 13:15:07 -05003851 if (info->segment == pci_domain_nr(pdev->bus) &&
3852 info->bus == pdev->bus->number &&
Weidong Hanc7151a82008-12-08 22:51:37 +08003853 info->devfn == pdev->devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01003854 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003855 spin_unlock_irqrestore(&device_domain_lock, flags);
3856
Yu Zhao93a23a72009-05-18 13:51:37 +08003857 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003858 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003859 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003860 free_devinfo_mem(info);
3861
3862 spin_lock_irqsave(&device_domain_lock, flags);
3863
3864 if (found)
3865 break;
3866 else
3867 continue;
3868 }
3869
3870 /* if there is no other devices under the same iommu
3871 * owned by this domain, clear this iommu in iommu_bmp
3872 * update iommu count and coherency
3873 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003874 if (iommu == device_to_iommu(info->segment, info->bus,
3875 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003876 found = 1;
3877 }
3878
Roland Dreier3e7abe22011-07-20 06:22:21 -07003879 spin_unlock_irqrestore(&device_domain_lock, flags);
3880
Weidong Hanc7151a82008-12-08 22:51:37 +08003881 if (found == 0) {
3882 unsigned long tmp_flags;
3883 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08003884 clear_bit(iommu->seq_id, domain->iommu_bmp);
Weidong Hanc7151a82008-12-08 22:51:37 +08003885 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003886 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003887 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07003888
Alex Williamson9b4554b2011-05-24 12:19:04 -04003889 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3890 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3891 spin_lock_irqsave(&iommu->lock, tmp_flags);
3892 clear_bit(domain->id, iommu->domain_ids);
3893 iommu->domains[domain->id] = NULL;
3894 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3895 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003896 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003897}
3898
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003899static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003900{
3901 int adjust_width;
3902
3903 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003904 domain_reserve_special_ranges(domain);
3905
3906 /* calculate AGAW */
3907 domain->gaw = guest_width;
3908 adjust_width = guestwidth_to_adjustwidth(guest_width);
3909 domain->agaw = width_to_agaw(adjust_width);
3910
Weidong Han5e98c4b2008-12-08 23:03:27 +08003911 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003912 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01003913 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003914 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003915 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003916
3917 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07003918 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003919 if (!domain->pgd)
3920 return -ENOMEM;
3921 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3922 return 0;
3923}
3924
Joerg Roedel5d450802008-12-03 14:52:32 +01003925static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003926{
Joerg Roedel5d450802008-12-03 14:52:32 +01003927 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003928
Jiang Liu92d03cc2014-02-19 14:07:28 +08003929 dmar_domain = alloc_domain(true);
Joerg Roedel5d450802008-12-03 14:52:32 +01003930 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003931 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003932 "intel_iommu_domain_init: dmar_domain == NULL\n");
3933 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003934 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003935 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003936 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003937 "intel_iommu_domain_init() failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08003938 domain_exit(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01003939 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003940 }
Allen Kay8140a952011-10-14 12:32:17 -07003941 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01003942 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003943
Joerg Roedel8a0e7152012-01-26 19:40:54 +01003944 domain->geometry.aperture_start = 0;
3945 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
3946 domain->geometry.force_aperture = true;
3947
Joerg Roedel5d450802008-12-03 14:52:32 +01003948 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003949}
Kay, Allen M38717942008-09-09 18:37:29 +03003950
Joerg Roedel5d450802008-12-03 14:52:32 +01003951static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003952{
Joerg Roedel5d450802008-12-03 14:52:32 +01003953 struct dmar_domain *dmar_domain = domain->priv;
3954
3955 domain->priv = NULL;
Jiang Liu92d03cc2014-02-19 14:07:28 +08003956 domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003957}
Kay, Allen M38717942008-09-09 18:37:29 +03003958
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003959static int intel_iommu_attach_device(struct iommu_domain *domain,
3960 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003961{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003962 struct dmar_domain *dmar_domain = domain->priv;
3963 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003964 struct intel_iommu *iommu;
3965 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03003966
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003967 /* normally pdev is not mapped */
3968 if (unlikely(domain_context_mapped(pdev))) {
3969 struct dmar_domain *old_domain;
3970
3971 old_domain = find_domain(pdev);
3972 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003973 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3974 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3975 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003976 else
3977 domain_remove_dev_info(old_domain);
3978 }
3979 }
3980
David Woodhouse276dbf992009-04-04 01:45:37 +01003981 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3982 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003983 if (!iommu)
3984 return -ENODEV;
3985
3986 /* check if this iommu agaw is sufficient for max mapped address */
3987 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01003988 if (addr_width > cap_mgaw(iommu->cap))
3989 addr_width = cap_mgaw(iommu->cap);
3990
3991 if (dmar_domain->max_addr > (1LL << addr_width)) {
3992 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003993 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01003994 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003995 return -EFAULT;
3996 }
Tom Lyona99c47a2010-05-17 08:20:45 +01003997 dmar_domain->gaw = addr_width;
3998
3999 /*
4000 * Knock out extra levels of page tables if necessary
4001 */
4002 while (iommu->agaw < dmar_domain->agaw) {
4003 struct dma_pte *pte;
4004
4005 pte = dmar_domain->pgd;
4006 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004007 dmar_domain->pgd = (struct dma_pte *)
4008 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004009 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004010 }
4011 dmar_domain->agaw--;
4012 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004013
David Woodhouse5fe60f42009-08-09 10:53:41 +01004014 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004015}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004016
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004017static void intel_iommu_detach_device(struct iommu_domain *domain,
4018 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004019{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004020 struct dmar_domain *dmar_domain = domain->priv;
4021 struct pci_dev *pdev = to_pci_dev(dev);
4022
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004023 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03004024}
Kay, Allen M38717942008-09-09 18:37:29 +03004025
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004026static int intel_iommu_map(struct iommu_domain *domain,
4027 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004028 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004029{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004030 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004031 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004032 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004033 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004034
Joerg Roedeldde57a22008-12-03 15:04:09 +01004035 if (iommu_prot & IOMMU_READ)
4036 prot |= DMA_PTE_READ;
4037 if (iommu_prot & IOMMU_WRITE)
4038 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08004039 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4040 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004041
David Woodhouse163cc522009-06-28 00:51:17 +01004042 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004043 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004044 u64 end;
4045
4046 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004047 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004048 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004049 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004050 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004051 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004052 return -EFAULT;
4053 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004054 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004055 }
David Woodhousead051222009-06-28 14:22:28 +01004056 /* Round up size to next multiple of PAGE_SIZE, if it and
4057 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004058 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004059 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4060 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004061 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004062}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004063
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004064static size_t intel_iommu_unmap(struct iommu_domain *domain,
4065 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004066{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004067 struct dmar_domain *dmar_domain = domain->priv;
Allen Kay292827c2011-10-14 12:31:54 -07004068 int order;
Sheng Yang4b99d352009-07-08 11:52:52 +01004069
Allen Kay292827c2011-10-14 12:31:54 -07004070 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
David Woodhouse163cc522009-06-28 00:51:17 +01004071 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004072
David Woodhouse163cc522009-06-28 00:51:17 +01004073 if (dmar_domain->max_addr == iova + size)
4074 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004075
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004076 return PAGE_SIZE << order;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004077}
Kay, Allen M38717942008-09-09 18:37:29 +03004078
Joerg Roedeld14d6572008-12-03 15:06:57 +01004079static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05304080 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004081{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004082 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004083 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004084 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004085
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004086 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
Kay, Allen M38717942008-09-09 18:37:29 +03004087 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004088 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004089
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004090 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004091}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004092
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004093static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4094 unsigned long cap)
4095{
4096 struct dmar_domain *dmar_domain = domain->priv;
4097
4098 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4099 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004100 if (cap == IOMMU_CAP_INTR_REMAP)
Suresh Siddha95a02e92012-03-30 11:47:07 -07004101 return irq_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004102
4103 return 0;
4104}
4105
Alex Williamson783f1572012-05-30 14:19:43 -06004106#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4107
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004108static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004109{
4110 struct pci_dev *pdev = to_pci_dev(dev);
Alex Williamson3da4af02012-11-13 10:22:03 -07004111 struct pci_dev *bridge, *dma_pdev = NULL;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004112 struct iommu_group *group;
4113 int ret;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004114
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004115 if (!device_to_iommu(pci_domain_nr(pdev->bus),
4116 pdev->bus->number, pdev->devfn))
Alex Williamson70ae6f02011-10-21 15:56:11 -04004117 return -ENODEV;
4118
4119 bridge = pci_find_upstream_pcie_bridge(pdev);
4120 if (bridge) {
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004121 if (pci_is_pcie(bridge))
4122 dma_pdev = pci_get_domain_bus_and_slot(
4123 pci_domain_nr(pdev->bus),
4124 bridge->subordinate->number, 0);
Alex Williamson3da4af02012-11-13 10:22:03 -07004125 if (!dma_pdev)
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004126 dma_pdev = pci_dev_get(bridge);
4127 } else
4128 dma_pdev = pci_dev_get(pdev);
4129
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004130 /* Account for quirked devices */
Alex Williamson783f1572012-05-30 14:19:43 -06004131 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
4132
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004133 /*
4134 * If it's a multifunction device that does not support our
Alex Williamsonc14d2692013-05-30 12:39:18 -06004135 * required ACS flags, add to the same group as lowest numbered
4136 * function that also does not suport the required ACS flags.
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004137 */
Alex Williamson783f1572012-05-30 14:19:43 -06004138 if (dma_pdev->multifunction &&
Alex Williamsonc14d2692013-05-30 12:39:18 -06004139 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
4140 u8 i, slot = PCI_SLOT(dma_pdev->devfn);
4141
4142 for (i = 0; i < 8; i++) {
4143 struct pci_dev *tmp;
4144
4145 tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
4146 if (!tmp)
4147 continue;
4148
4149 if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
4150 swap_pci_ref(&dma_pdev, tmp);
4151 break;
4152 }
4153 pci_dev_put(tmp);
4154 }
4155 }
Alex Williamson783f1572012-05-30 14:19:43 -06004156
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004157 /*
4158 * Devices on the root bus go through the iommu. If that's not us,
4159 * find the next upstream device and test ACS up to the root bus.
4160 * Finding the next device may require skipping virtual buses.
4161 */
Alex Williamson783f1572012-05-30 14:19:43 -06004162 while (!pci_is_root_bus(dma_pdev->bus)) {
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004163 struct pci_bus *bus = dma_pdev->bus;
4164
4165 while (!bus->self) {
4166 if (!pci_is_root_bus(bus))
4167 bus = bus->parent;
4168 else
4169 goto root_bus;
4170 }
4171
4172 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
Alex Williamson783f1572012-05-30 14:19:43 -06004173 break;
4174
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004175 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
Alex Williamson70ae6f02011-10-21 15:56:11 -04004176 }
4177
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004178root_bus:
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004179 group = iommu_group_get(&dma_pdev->dev);
4180 pci_dev_put(dma_pdev);
4181 if (!group) {
4182 group = iommu_group_alloc();
4183 if (IS_ERR(group))
4184 return PTR_ERR(group);
4185 }
Alex Williamsonbcb71ab2011-10-21 15:56:24 -04004186
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004187 ret = iommu_group_add_device(group, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004188
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004189 iommu_group_put(group);
4190 return ret;
4191}
4192
4193static void intel_iommu_remove_device(struct device *dev)
4194{
4195 iommu_group_remove_device(dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004196}
4197
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004198static struct iommu_ops intel_iommu_ops = {
4199 .domain_init = intel_iommu_domain_init,
4200 .domain_destroy = intel_iommu_domain_destroy,
4201 .attach_dev = intel_iommu_attach_device,
4202 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004203 .map = intel_iommu_map,
4204 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004205 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004206 .domain_has_cap = intel_iommu_domain_has_cap,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004207 .add_device = intel_iommu_add_device,
4208 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004209 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004210};
David Woodhouse9af88142009-02-13 23:18:03 +00004211
Daniel Vetter94526182013-01-20 23:50:13 +01004212static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4213{
4214 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4215 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4216 dmar_map_gfx = 0;
4217}
4218
4219DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4220DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4221DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4222DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4223DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4224DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4225DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4226
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004227static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004228{
4229 /*
4230 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004231 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004232 */
4233 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4234 rwbf_quirk = 1;
4235}
4236
4237DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004238DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4239DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4240DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4241DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4242DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4243DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004244
Adam Jacksoneecfd572010-08-25 21:17:34 +01004245#define GGC 0x52
4246#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4247#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4248#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4249#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4250#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4251#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4252#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4253#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4254
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004255static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004256{
4257 unsigned short ggc;
4258
Adam Jacksoneecfd572010-08-25 21:17:34 +01004259 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004260 return;
4261
Adam Jacksoneecfd572010-08-25 21:17:34 +01004262 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004263 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4264 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004265 } else if (dmar_map_gfx) {
4266 /* we have to ensure the gfx device is idle before we flush */
4267 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4268 intel_iommu_strict = 1;
4269 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004270}
4271DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4272DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4273DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4274DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4275
David Woodhousee0fc7e02009-09-30 09:12:17 -07004276/* On Tylersburg chipsets, some BIOSes have been known to enable the
4277 ISOCH DMAR unit for the Azalia sound device, but not give it any
4278 TLB entries, which causes it to deadlock. Check for that. We do
4279 this in a function called from init_dmars(), instead of in a PCI
4280 quirk, because we don't want to print the obnoxious "BIOS broken"
4281 message if VT-d is actually disabled.
4282*/
4283static void __init check_tylersburg_isoch(void)
4284{
4285 struct pci_dev *pdev;
4286 uint32_t vtisochctrl;
4287
4288 /* If there's no Azalia in the system anyway, forget it. */
4289 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4290 if (!pdev)
4291 return;
4292 pci_dev_put(pdev);
4293
4294 /* System Management Registers. Might be hidden, in which case
4295 we can't do the sanity check. But that's OK, because the
4296 known-broken BIOSes _don't_ actually hide it, so far. */
4297 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4298 if (!pdev)
4299 return;
4300
4301 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4302 pci_dev_put(pdev);
4303 return;
4304 }
4305
4306 pci_dev_put(pdev);
4307
4308 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4309 if (vtisochctrl & 1)
4310 return;
4311
4312 /* Drop all bits other than the number of TLB entries */
4313 vtisochctrl &= 0x1c;
4314
4315 /* If we have the recommended number of TLB entries (16), fine. */
4316 if (vtisochctrl == 0x10)
4317 return;
4318
4319 /* Zero TLB entries? You get to ride the short bus to school. */
4320 if (!vtisochctrl) {
4321 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4322 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4323 dmi_get_system_info(DMI_BIOS_VENDOR),
4324 dmi_get_system_info(DMI_BIOS_VERSION),
4325 dmi_get_system_info(DMI_PRODUCT_VERSION));
4326 iommu_identity_mapping |= IDENTMAP_AZALIA;
4327 return;
4328 }
4329
4330 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4331 vtisochctrl);
4332}