blob: 219c2c51a3122504e1dd357aa02c5de6f5175246 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070018 */
19
20#include <linux/init.h>
21#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080022#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040023#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070024#include <linux/slab.h>
25#include <linux/irq.h>
26#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/spinlock.h>
28#include <linux/pci.h>
29#include <linux/dmar.h>
30#include <linux/dma-mapping.h>
31#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080032#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080033#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030034#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010035#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010037#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100039#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020040#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080041#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070042#include <linux/dma-contiguous.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070043#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070044#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090045#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046
Joerg Roedel078e1ee2012-09-26 12:44:43 +020047#include "irq_remapping.h"
Varun Sethi61e015a2013-04-23 10:05:24 +053048#include "pci.h"
Joerg Roedel078e1ee2012-09-26 12:44:43 +020049
Fenghua Yu5b6985c2008-10-16 18:02:32 -070050#define ROOT_SIZE VTD_PAGE_SIZE
51#define CONTEXT_SIZE VTD_PAGE_SIZE
52
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070053#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
54#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070055#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070056
57#define IOAPIC_RANGE_START (0xfee00000)
58#define IOAPIC_RANGE_END (0xfeefffff)
59#define IOVA_START_ADDR (0x1000)
60
61#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
62
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070063#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080064#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070065
David Woodhouse2ebe3152009-09-19 07:34:04 -070066#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
67#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
68
69/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
70 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
71#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
72 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
73#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070074
Mark McLoughlinf27be032008-11-20 15:49:43 +000075#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070076#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070077#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080078
Andrew Mortondf08cdc2010-09-22 13:05:11 -070079/* page table handling */
80#define LEVEL_STRIDE (9)
81#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
82
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020083/*
84 * This bitmap is used to advertise the page sizes our hardware support
85 * to the IOMMU core, which will then use this information to split
86 * physically contiguous memory regions it is mapping into page sizes
87 * that we support.
88 *
89 * Traditionally the IOMMU core just handed us the mappings directly,
90 * after making sure the size is an order of a 4KiB page and that the
91 * mapping has natural alignment.
92 *
93 * To retain this behavior, we currently advertise that we support
94 * all page sizes that are an order of 4KiB.
95 *
96 * If at some point we'd like to utilize the IOMMU core's new behavior,
97 * we could change this to advertise the real page sizes we support.
98 */
99#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
100
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700101static inline int agaw_to_level(int agaw)
102{
103 return agaw + 2;
104}
105
106static inline int agaw_to_width(int agaw)
107{
Jiang Liu5c645b32014-01-06 14:18:12 +0800108 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700109}
110
111static inline int width_to_agaw(int width)
112{
Jiang Liu5c645b32014-01-06 14:18:12 +0800113 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700114}
115
116static inline unsigned int level_to_offset_bits(int level)
117{
118 return (level - 1) * LEVEL_STRIDE;
119}
120
121static inline int pfn_level_offset(unsigned long pfn, int level)
122{
123 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
124}
125
126static inline unsigned long level_mask(int level)
127{
128 return -1UL << level_to_offset_bits(level);
129}
130
131static inline unsigned long level_size(int level)
132{
133 return 1UL << level_to_offset_bits(level);
134}
135
136static inline unsigned long align_to_level(unsigned long pfn, int level)
137{
138 return (pfn + level_size(level) - 1) & level_mask(level);
139}
David Woodhousefd18de52009-05-10 23:57:41 +0100140
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100141static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
142{
Jiang Liu5c645b32014-01-06 14:18:12 +0800143 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100144}
145
David Woodhousedd4e8312009-06-27 16:21:20 +0100146/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
147 are never going to work. */
148static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
149{
150 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
151}
152
153static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
154{
155 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
156}
157static inline unsigned long page_to_dma_pfn(struct page *pg)
158{
159 return mm_to_dma_pfn(page_to_pfn(pg));
160}
161static inline unsigned long virt_to_dma_pfn(void *p)
162{
163 return page_to_dma_pfn(virt_to_page(p));
164}
165
Weidong Hand9630fe2008-12-08 11:06:32 +0800166/* global iommu list, set NULL for ignored DMAR units */
167static struct intel_iommu **g_iommus;
168
David Woodhousee0fc7e02009-09-30 09:12:17 -0700169static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000170static int rwbf_quirk;
171
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000172/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700173 * set to 1 to panic kernel if can't successfully enable VT-d
174 * (used when kernel is launched w/ TXT)
175 */
176static int force_on = 0;
177
178/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000179 * 0: Present
180 * 1-11: Reserved
181 * 12-63: Context Ptr (12 - (haw-1))
182 * 64-127: Reserved
183 */
184struct root_entry {
185 u64 val;
186 u64 rsvd1;
187};
188#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
189static inline bool root_present(struct root_entry *root)
190{
191 return (root->val & 1);
192}
193static inline void set_root_present(struct root_entry *root)
194{
195 root->val |= 1;
196}
197static inline void set_root_value(struct root_entry *root, unsigned long value)
198{
199 root->val |= value & VTD_PAGE_MASK;
200}
201
202static inline struct context_entry *
203get_context_addr_from_root(struct root_entry *root)
204{
205 return (struct context_entry *)
206 (root_present(root)?phys_to_virt(
207 root->val & VTD_PAGE_MASK) :
208 NULL);
209}
210
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000211/*
212 * low 64 bits:
213 * 0: present
214 * 1: fault processing disable
215 * 2-3: translation type
216 * 12-63: address space root
217 * high 64 bits:
218 * 0-2: address width
219 * 3-6: aval
220 * 8-23: domain id
221 */
222struct context_entry {
223 u64 lo;
224 u64 hi;
225};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000226
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000227static inline bool context_present(struct context_entry *context)
228{
229 return (context->lo & 1);
230}
231static inline void context_set_present(struct context_entry *context)
232{
233 context->lo |= 1;
234}
235
236static inline void context_set_fault_enable(struct context_entry *context)
237{
238 context->lo &= (((u64)-1) << 2) | 1;
239}
240
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000241static inline void context_set_translation_type(struct context_entry *context,
242 unsigned long value)
243{
244 context->lo &= (((u64)-1) << 4) | 3;
245 context->lo |= (value & 3) << 2;
246}
247
248static inline void context_set_address_root(struct context_entry *context,
249 unsigned long value)
250{
251 context->lo |= value & VTD_PAGE_MASK;
252}
253
254static inline void context_set_address_width(struct context_entry *context,
255 unsigned long value)
256{
257 context->hi |= value & 7;
258}
259
260static inline void context_set_domain_id(struct context_entry *context,
261 unsigned long value)
262{
263 context->hi |= (value & ((1 << 16) - 1)) << 8;
264}
265
266static inline void context_clear_entry(struct context_entry *context)
267{
268 context->lo = 0;
269 context->hi = 0;
270}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000271
Mark McLoughlin622ba122008-11-20 15:49:46 +0000272/*
273 * 0: readable
274 * 1: writable
275 * 2-6: reserved
276 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800277 * 8-10: available
278 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000279 * 12-63: Host physcial address
280 */
281struct dma_pte {
282 u64 val;
283};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000284
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000285static inline void dma_clear_pte(struct dma_pte *pte)
286{
287 pte->val = 0;
288}
289
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000290static inline u64 dma_pte_addr(struct dma_pte *pte)
291{
David Woodhousec85994e2009-07-01 19:21:24 +0100292#ifdef CONFIG_64BIT
293 return pte->val & VTD_PAGE_MASK;
294#else
295 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100296 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100297#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000298}
299
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000300static inline bool dma_pte_present(struct dma_pte *pte)
301{
302 return (pte->val & 3) != 0;
303}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000304
Allen Kay4399c8b2011-10-14 12:32:46 -0700305static inline bool dma_pte_superpage(struct dma_pte *pte)
306{
307 return (pte->val & (1 << 7));
308}
309
David Woodhouse75e6bf92009-07-02 11:21:16 +0100310static inline int first_pte_in_page(struct dma_pte *pte)
311{
312 return !((unsigned long)pte & ~VTD_PAGE_MASK);
313}
314
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700315/*
316 * This domain is a statically identity mapping domain.
317 * 1. This domain creats a static 1:1 mapping to all usable memory.
318 * 2. It maps to each iommu if successful.
319 * 3. Each iommu mapps to this domain if successful.
320 */
David Woodhouse19943b02009-08-04 16:19:20 +0100321static struct dmar_domain *si_domain;
322static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700323
Weidong Han3b5410e2008-12-08 09:17:15 +0800324/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100325#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800326
Weidong Han1ce28fe2008-12-08 16:35:39 +0800327/* domain represents a virtual machine, more than one devices
328 * across iommus may be owned in one domain, e.g. kvm guest.
329 */
330#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
331
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700332/* si_domain contains mulitple devices */
333#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
334
Mike Travis1b198bb2012-03-05 15:05:16 -0800335/* define the limit of IOMMUs supported in each domain */
336#ifdef CONFIG_X86
337# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
338#else
339# define IOMMU_UNITS_SUPPORTED 64
340#endif
341
Mark McLoughlin99126f72008-11-20 15:49:47 +0000342struct dmar_domain {
343 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700344 int nid; /* node id */
Mike Travis1b198bb2012-03-05 15:05:16 -0800345 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
346 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000347
348 struct list_head devices; /* all devices' list */
349 struct iova_domain iovad; /* iova's that belong to this domain */
350
351 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000352 int gaw; /* max guest address width */
353
354 /* adjusted guest address width, 0 is level 2 30-bit */
355 int agaw;
356
Weidong Han3b5410e2008-12-08 09:17:15 +0800357 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800358
359 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800360 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800361 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100362 int iommu_superpage;/* Level of superpages supported:
363 0 == 4KiB (no superpages), 1 == 2MiB,
364 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800365 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800366 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000367};
368
Mark McLoughlina647dac2008-11-20 15:49:48 +0000369/* PCI domain-device relationship */
370struct device_domain_info {
371 struct list_head link; /* link to domain siblings */
372 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100373 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000374 u8 devfn; /* PCI devfn number */
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000375 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800376 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000377 struct dmar_domain *domain; /* pointer to domain */
378};
379
Jiang Liub94e4112014-02-19 14:07:25 +0800380struct dmar_rmrr_unit {
381 struct list_head list; /* list of rmrr units */
382 struct acpi_dmar_header *hdr; /* ACPI header */
383 u64 base_address; /* reserved base address*/
384 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000385 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800386 int devices_cnt; /* target device count */
387};
388
389struct dmar_atsr_unit {
390 struct list_head list; /* list of ATSR units */
391 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000392 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800393 int devices_cnt; /* target device count */
394 u8 include_all:1; /* include all ports */
395};
396
397static LIST_HEAD(dmar_atsr_units);
398static LIST_HEAD(dmar_rmrr_units);
399
400#define for_each_rmrr_units(rmrr) \
401 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
402
mark gross5e0d2a62008-03-04 15:22:08 -0800403static void flush_unmaps_timeout(unsigned long data);
404
Jiang Liub707cb02014-01-06 14:18:26 +0800405static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800406
mark gross80b20dd2008-04-18 13:53:58 -0700407#define HIGH_WATER_MARK 250
408struct deferred_flush_tables {
409 int next;
410 struct iova *iova[HIGH_WATER_MARK];
411 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000412 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700413};
414
415static struct deferred_flush_tables *deferred_flush;
416
mark gross5e0d2a62008-03-04 15:22:08 -0800417/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800418static int g_num_of_iommus;
419
420static DEFINE_SPINLOCK(async_umap_flush_lock);
421static LIST_HEAD(unmaps_to_do);
422
423static int timer_on;
424static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800425
Jiang Liu92d03cc2014-02-19 14:07:28 +0800426static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700427static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800428static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -0700429 struct device *dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800430static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000431 struct device *dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700432
Suresh Siddhad3f13812011-08-23 17:05:25 -0700433#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800434int dmar_disabled = 0;
435#else
436int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700437#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800438
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200439int intel_iommu_enabled = 0;
440EXPORT_SYMBOL_GPL(intel_iommu_enabled);
441
David Woodhouse2d9e6672010-06-15 10:57:57 +0100442static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700443static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800444static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100445static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700446
David Woodhousec0771df2011-10-14 20:59:46 +0100447int intel_iommu_gfx_mapped;
448EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
449
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700450#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
451static DEFINE_SPINLOCK(device_domain_lock);
452static LIST_HEAD(device_domain_list);
453
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100454static struct iommu_ops intel_iommu_ops;
455
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700456static int __init intel_iommu_setup(char *str)
457{
458 if (!str)
459 return -EINVAL;
460 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800461 if (!strncmp(str, "on", 2)) {
462 dmar_disabled = 0;
463 printk(KERN_INFO "Intel-IOMMU: enabled\n");
464 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700465 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800466 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700467 } else if (!strncmp(str, "igfx_off", 8)) {
468 dmar_map_gfx = 0;
469 printk(KERN_INFO
470 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700471 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800472 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700473 "Intel-IOMMU: Forcing DAC for PCI devices\n");
474 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800475 } else if (!strncmp(str, "strict", 6)) {
476 printk(KERN_INFO
477 "Intel-IOMMU: disable batched IOTLB flush\n");
478 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100479 } else if (!strncmp(str, "sp_off", 6)) {
480 printk(KERN_INFO
481 "Intel-IOMMU: disable supported super page\n");
482 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700483 }
484
485 str += strcspn(str, ",");
486 while (*str == ',')
487 str++;
488 }
489 return 0;
490}
491__setup("intel_iommu=", intel_iommu_setup);
492
493static struct kmem_cache *iommu_domain_cache;
494static struct kmem_cache *iommu_devinfo_cache;
495static struct kmem_cache *iommu_iova_cache;
496
Suresh Siddha4c923d42009-10-02 11:01:24 -0700497static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700498{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700499 struct page *page;
500 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700501
Suresh Siddha4c923d42009-10-02 11:01:24 -0700502 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
503 if (page)
504 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700505 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700506}
507
508static inline void free_pgtable_page(void *vaddr)
509{
510 free_page((unsigned long)vaddr);
511}
512
513static inline void *alloc_domain_mem(void)
514{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900515 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700516}
517
Kay, Allen M38717942008-09-09 18:37:29 +0300518static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700519{
520 kmem_cache_free(iommu_domain_cache, vaddr);
521}
522
523static inline void * alloc_devinfo_mem(void)
524{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900525 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700526}
527
528static inline void free_devinfo_mem(void *vaddr)
529{
530 kmem_cache_free(iommu_devinfo_cache, vaddr);
531}
532
533struct iova *alloc_iova_mem(void)
534{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900535 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700536}
537
538void free_iova_mem(struct iova *iova)
539{
540 kmem_cache_free(iommu_iova_cache, iova);
541}
542
Weidong Han1b573682008-12-08 15:34:06 +0800543
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700544static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800545{
546 unsigned long sagaw;
547 int agaw = -1;
548
549 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700550 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800551 agaw >= 0; agaw--) {
552 if (test_bit(agaw, &sagaw))
553 break;
554 }
555
556 return agaw;
557}
558
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700559/*
560 * Calculate max SAGAW for each iommu.
561 */
562int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
563{
564 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
565}
566
567/*
568 * calculate agaw for each iommu.
569 * "SAGAW" may be different across iommus, use a default agaw, and
570 * get a supported less agaw for iommus that don't support the default agaw.
571 */
572int iommu_calculate_agaw(struct intel_iommu *iommu)
573{
574 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
575}
576
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700577/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800578static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
579{
580 int iommu_id;
581
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700582 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800583 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700584 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800585
Mike Travis1b198bb2012-03-05 15:05:16 -0800586 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800587 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
588 return NULL;
589
590 return g_iommus[iommu_id];
591}
592
Weidong Han8e6040972008-12-08 15:49:06 +0800593static void domain_update_iommu_coherency(struct dmar_domain *domain)
594{
David Woodhoused0501962014-03-11 17:10:29 -0700595 struct dmar_drhd_unit *drhd;
596 struct intel_iommu *iommu;
597 int i, found = 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800598
David Woodhoused0501962014-03-11 17:10:29 -0700599 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800600
Mike Travis1b198bb2012-03-05 15:05:16 -0800601 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
David Woodhoused0501962014-03-11 17:10:29 -0700602 found = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800603 if (!ecap_coherent(g_iommus[i]->ecap)) {
604 domain->iommu_coherency = 0;
605 break;
606 }
Weidong Han8e6040972008-12-08 15:49:06 +0800607 }
David Woodhoused0501962014-03-11 17:10:29 -0700608 if (found)
609 return;
610
611 /* No hardware attached; use lowest common denominator */
612 rcu_read_lock();
613 for_each_active_iommu(iommu, drhd) {
614 if (!ecap_coherent(iommu->ecap)) {
615 domain->iommu_coherency = 0;
616 break;
617 }
618 }
619 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800620}
621
Sheng Yang58c610b2009-03-18 15:33:05 +0800622static void domain_update_iommu_snooping(struct dmar_domain *domain)
623{
624 int i;
625
626 domain->iommu_snooping = 1;
627
Mike Travis1b198bb2012-03-05 15:05:16 -0800628 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800629 if (!ecap_sc_support(g_iommus[i]->ecap)) {
630 domain->iommu_snooping = 0;
631 break;
632 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800633 }
634}
635
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100636static void domain_update_iommu_superpage(struct dmar_domain *domain)
637{
Allen Kay8140a952011-10-14 12:32:17 -0700638 struct dmar_drhd_unit *drhd;
639 struct intel_iommu *iommu = NULL;
640 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100641
642 if (!intel_iommu_superpage) {
643 domain->iommu_superpage = 0;
644 return;
645 }
646
Allen Kay8140a952011-10-14 12:32:17 -0700647 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800648 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700649 for_each_active_iommu(iommu, drhd) {
650 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100651 if (!mask) {
652 break;
653 }
654 }
Jiang Liu0e242612014-02-19 14:07:34 +0800655 rcu_read_unlock();
656
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100657 domain->iommu_superpage = fls(mask);
658}
659
Sheng Yang58c610b2009-03-18 15:33:05 +0800660/* Some capabilities may be different across iommus */
661static void domain_update_iommu_cap(struct dmar_domain *domain)
662{
663 domain_update_iommu_coherency(domain);
664 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100665 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800666}
667
David Woodhouse156baca2014-03-09 14:00:57 -0700668static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800669{
670 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800671 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700672 struct device *tmp;
673 struct pci_dev *ptmp, *pdev = NULL;
674 u16 segment;
Weidong Hanc7151a82008-12-08 22:51:37 +0800675 int i;
676
David Woodhouse156baca2014-03-09 14:00:57 -0700677 if (dev_is_pci(dev)) {
678 pdev = to_pci_dev(dev);
679 segment = pci_domain_nr(pdev->bus);
680 } else if (ACPI_COMPANION(dev))
681 dev = &ACPI_COMPANION(dev)->dev;
682
Jiang Liu0e242612014-02-19 14:07:34 +0800683 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800684 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700685 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100686 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800687
Jiang Liub683b232014-02-19 14:07:32 +0800688 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700689 drhd->devices_cnt, i, tmp) {
690 if (tmp == dev) {
691 *bus = drhd->devices[i].bus;
692 *devfn = drhd->devices[i].devfn;
693 goto out;
694 }
695
696 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000697 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700698
699 ptmp = to_pci_dev(tmp);
700 if (ptmp->subordinate &&
701 ptmp->subordinate->number <= pdev->bus->number &&
702 ptmp->subordinate->busn_res.end >= pdev->bus->number)
703 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100704 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800705
David Woodhouse156baca2014-03-09 14:00:57 -0700706 if (pdev && drhd->include_all) {
707 got_pdev:
708 *bus = pdev->bus->number;
709 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800710 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700711 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800712 }
Jiang Liub683b232014-02-19 14:07:32 +0800713 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700714 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800715 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800716
Jiang Liub683b232014-02-19 14:07:32 +0800717 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800718}
719
Weidong Han5331fe62008-12-08 23:00:00 +0800720static void domain_flush_cache(struct dmar_domain *domain,
721 void *addr, int size)
722{
723 if (!domain->iommu_coherency)
724 clflush_cache_range(addr, size);
725}
726
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700727/* Gets context entry for a given bus and devfn */
728static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
729 u8 bus, u8 devfn)
730{
731 struct root_entry *root;
732 struct context_entry *context;
733 unsigned long phy_addr;
734 unsigned long flags;
735
736 spin_lock_irqsave(&iommu->lock, flags);
737 root = &iommu->root_entry[bus];
738 context = get_context_addr_from_root(root);
739 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700740 context = (struct context_entry *)
741 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700742 if (!context) {
743 spin_unlock_irqrestore(&iommu->lock, flags);
744 return NULL;
745 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700746 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700747 phy_addr = virt_to_phys((void *)context);
748 set_root_value(root, phy_addr);
749 set_root_present(root);
750 __iommu_flush_cache(iommu, root, sizeof(*root));
751 }
752 spin_unlock_irqrestore(&iommu->lock, flags);
753 return &context[devfn];
754}
755
756static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
757{
758 struct root_entry *root;
759 struct context_entry *context;
760 int ret;
761 unsigned long flags;
762
763 spin_lock_irqsave(&iommu->lock, flags);
764 root = &iommu->root_entry[bus];
765 context = get_context_addr_from_root(root);
766 if (!context) {
767 ret = 0;
768 goto out;
769 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000770 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700771out:
772 spin_unlock_irqrestore(&iommu->lock, flags);
773 return ret;
774}
775
776static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
777{
778 struct root_entry *root;
779 struct context_entry *context;
780 unsigned long flags;
781
782 spin_lock_irqsave(&iommu->lock, flags);
783 root = &iommu->root_entry[bus];
784 context = get_context_addr_from_root(root);
785 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000786 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700787 __iommu_flush_cache(iommu, &context[devfn], \
788 sizeof(*context));
789 }
790 spin_unlock_irqrestore(&iommu->lock, flags);
791}
792
793static void free_context_table(struct intel_iommu *iommu)
794{
795 struct root_entry *root;
796 int i;
797 unsigned long flags;
798 struct context_entry *context;
799
800 spin_lock_irqsave(&iommu->lock, flags);
801 if (!iommu->root_entry) {
802 goto out;
803 }
804 for (i = 0; i < ROOT_ENTRY_NR; i++) {
805 root = &iommu->root_entry[i];
806 context = get_context_addr_from_root(root);
807 if (context)
808 free_pgtable_page(context);
809 }
810 free_pgtable_page(iommu->root_entry);
811 iommu->root_entry = NULL;
812out:
813 spin_unlock_irqrestore(&iommu->lock, flags);
814}
815
David Woodhouseb026fd22009-06-28 10:37:25 +0100816static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000817 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700818{
David Woodhouseb026fd22009-06-28 10:37:25 +0100819 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700820 struct dma_pte *parent, *pte = NULL;
821 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700822 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700823
824 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200825
826 if (addr_width < BITS_PER_LONG && pfn >> addr_width)
827 /* Address beyond IOMMU's addressing capabilities. */
828 return NULL;
829
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700830 parent = domain->pgd;
831
David Woodhouse5cf0a762014-03-19 16:07:49 +0000832 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700833 void *tmp_page;
834
David Woodhouseb026fd22009-06-28 10:37:25 +0100835 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700836 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000837 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100838 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000839 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700840 break;
841
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000842 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100843 uint64_t pteval;
844
Suresh Siddha4c923d42009-10-02 11:01:24 -0700845 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700846
David Woodhouse206a73c12009-07-01 19:30:28 +0100847 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700848 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100849
David Woodhousec85994e2009-07-01 19:21:24 +0100850 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400851 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100852 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
853 /* Someone else set it while we were thinking; use theirs. */
854 free_pgtable_page(tmp_page);
855 } else {
856 dma_pte_addr(pte);
857 domain_flush_cache(domain, pte, sizeof(*pte));
858 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700859 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000860 if (level == 1)
861 break;
862
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000863 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700864 level--;
865 }
866
David Woodhouse5cf0a762014-03-19 16:07:49 +0000867 if (!*target_level)
868 *target_level = level;
869
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700870 return pte;
871}
872
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100873
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700874/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100875static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
876 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100877 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700878{
879 struct dma_pte *parent, *pte = NULL;
880 int total = agaw_to_level(domain->agaw);
881 int offset;
882
883 parent = domain->pgd;
884 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100885 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700886 pte = &parent[offset];
887 if (level == total)
888 return pte;
889
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100890 if (!dma_pte_present(pte)) {
891 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700892 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100893 }
894
Yijing Wange16922a2014-05-20 20:37:51 +0800895 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100896 *large_page = total;
897 return pte;
898 }
899
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000900 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700901 total--;
902 }
903 return NULL;
904}
905
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700906/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +0000907static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100908 unsigned long start_pfn,
909 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700910{
David Woodhouse04b18e62009-06-27 19:15:01 +0100911 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100912 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100913 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700914
David Woodhouse04b18e62009-06-27 19:15:01 +0100915 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100916 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700917 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100918
David Woodhouse04b18e62009-06-27 19:15:01 +0100919 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700920 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100921 large_page = 1;
922 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100923 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100924 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100925 continue;
926 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100927 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100928 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100929 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100930 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100931 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
932
David Woodhouse310a5ab2009-06-28 18:52:20 +0100933 domain_flush_cache(domain, first_pte,
934 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700935
936 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700937}
938
Alex Williamson3269ee02013-06-15 10:27:19 -0600939static void dma_pte_free_level(struct dmar_domain *domain, int level,
940 struct dma_pte *pte, unsigned long pfn,
941 unsigned long start_pfn, unsigned long last_pfn)
942{
943 pfn = max(start_pfn, pfn);
944 pte = &pte[pfn_level_offset(pfn, level)];
945
946 do {
947 unsigned long level_pfn;
948 struct dma_pte *level_pte;
949
950 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
951 goto next;
952
953 level_pfn = pfn & level_mask(level - 1);
954 level_pte = phys_to_virt(dma_pte_addr(pte));
955
956 if (level > 2)
957 dma_pte_free_level(domain, level - 1, level_pte,
958 level_pfn, start_pfn, last_pfn);
959
960 /* If range covers entire pagetable, free it */
961 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -0800962 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -0600963 dma_clear_pte(pte);
964 domain_flush_cache(domain, pte, sizeof(*pte));
965 free_pgtable_page(level_pte);
966 }
967next:
968 pfn += level_size(level);
969 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
970}
971
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700972/* free page table pages. last level pte should already be cleared */
973static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100974 unsigned long start_pfn,
975 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700976{
David Woodhouse6660c632009-06-27 22:41:00 +0100977 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700978
David Woodhouse6660c632009-06-27 22:41:00 +0100979 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
980 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700981 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700982
David Woodhousef3a0a522009-06-30 03:40:07 +0100983 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -0600984 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
985 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +0100986
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700987 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100988 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700989 free_pgtable_page(domain->pgd);
990 domain->pgd = NULL;
991 }
992}
993
David Woodhouseea8ea462014-03-05 17:09:32 +0000994/* When a page at a given level is being unlinked from its parent, we don't
995 need to *modify* it at all. All we need to do is make a list of all the
996 pages which can be freed just as soon as we've flushed the IOTLB and we
997 know the hardware page-walk will no longer touch them.
998 The 'pte' argument is the *parent* PTE, pointing to the page that is to
999 be freed. */
1000static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1001 int level, struct dma_pte *pte,
1002 struct page *freelist)
1003{
1004 struct page *pg;
1005
1006 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1007 pg->freelist = freelist;
1008 freelist = pg;
1009
1010 if (level == 1)
1011 return freelist;
1012
Jiang Liuadeb2592014-04-09 10:20:39 +08001013 pte = page_address(pg);
1014 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001015 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1016 freelist = dma_pte_list_pagetables(domain, level - 1,
1017 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001018 pte++;
1019 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001020
1021 return freelist;
1022}
1023
1024static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1025 struct dma_pte *pte, unsigned long pfn,
1026 unsigned long start_pfn,
1027 unsigned long last_pfn,
1028 struct page *freelist)
1029{
1030 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1031
1032 pfn = max(start_pfn, pfn);
1033 pte = &pte[pfn_level_offset(pfn, level)];
1034
1035 do {
1036 unsigned long level_pfn;
1037
1038 if (!dma_pte_present(pte))
1039 goto next;
1040
1041 level_pfn = pfn & level_mask(level);
1042
1043 /* If range covers entire pagetable, free it */
1044 if (start_pfn <= level_pfn &&
1045 last_pfn >= level_pfn + level_size(level) - 1) {
1046 /* These suborbinate page tables are going away entirely. Don't
1047 bother to clear them; we're just going to *free* them. */
1048 if (level > 1 && !dma_pte_superpage(pte))
1049 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1050
1051 dma_clear_pte(pte);
1052 if (!first_pte)
1053 first_pte = pte;
1054 last_pte = pte;
1055 } else if (level > 1) {
1056 /* Recurse down into a level that isn't *entirely* obsolete */
1057 freelist = dma_pte_clear_level(domain, level - 1,
1058 phys_to_virt(dma_pte_addr(pte)),
1059 level_pfn, start_pfn, last_pfn,
1060 freelist);
1061 }
1062next:
1063 pfn += level_size(level);
1064 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1065
1066 if (first_pte)
1067 domain_flush_cache(domain, first_pte,
1068 (void *)++last_pte - (void *)first_pte);
1069
1070 return freelist;
1071}
1072
1073/* We can't just free the pages because the IOMMU may still be walking
1074 the page tables, and may have cached the intermediate levels. The
1075 pages can only be freed after the IOTLB flush has been done. */
1076struct page *domain_unmap(struct dmar_domain *domain,
1077 unsigned long start_pfn,
1078 unsigned long last_pfn)
1079{
1080 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1081 struct page *freelist = NULL;
1082
1083 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
1084 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
1085 BUG_ON(start_pfn > last_pfn);
1086
1087 /* we don't need lock here; nobody else touches the iova range */
1088 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1089 domain->pgd, 0, start_pfn, last_pfn, NULL);
1090
1091 /* free pgd */
1092 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1093 struct page *pgd_page = virt_to_page(domain->pgd);
1094 pgd_page->freelist = freelist;
1095 freelist = pgd_page;
1096
1097 domain->pgd = NULL;
1098 }
1099
1100 return freelist;
1101}
1102
1103void dma_free_pagelist(struct page *freelist)
1104{
1105 struct page *pg;
1106
1107 while ((pg = freelist)) {
1108 freelist = pg->freelist;
1109 free_pgtable_page(page_address(pg));
1110 }
1111}
1112
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001113/* iommu handling */
1114static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1115{
1116 struct root_entry *root;
1117 unsigned long flags;
1118
Suresh Siddha4c923d42009-10-02 11:01:24 -07001119 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001120 if (!root)
1121 return -ENOMEM;
1122
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001123 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001124
1125 spin_lock_irqsave(&iommu->lock, flags);
1126 iommu->root_entry = root;
1127 spin_unlock_irqrestore(&iommu->lock, flags);
1128
1129 return 0;
1130}
1131
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001132static void iommu_set_root_entry(struct intel_iommu *iommu)
1133{
1134 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001135 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001136 unsigned long flag;
1137
1138 addr = iommu->root_entry;
1139
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001140 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001141 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
1142
David Woodhousec416daa2009-05-10 20:30:58 +01001143 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001144
1145 /* Make sure hardware complete it */
1146 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001147 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001148
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001149 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001150}
1151
1152static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1153{
1154 u32 val;
1155 unsigned long flag;
1156
David Woodhouse9af88142009-02-13 23:18:03 +00001157 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001158 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001159
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001160 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001161 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001162
1163 /* Make sure hardware complete it */
1164 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001165 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001166
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001167 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001168}
1169
1170/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001171static void __iommu_flush_context(struct intel_iommu *iommu,
1172 u16 did, u16 source_id, u8 function_mask,
1173 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001174{
1175 u64 val = 0;
1176 unsigned long flag;
1177
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001178 switch (type) {
1179 case DMA_CCMD_GLOBAL_INVL:
1180 val = DMA_CCMD_GLOBAL_INVL;
1181 break;
1182 case DMA_CCMD_DOMAIN_INVL:
1183 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1184 break;
1185 case DMA_CCMD_DEVICE_INVL:
1186 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1187 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1188 break;
1189 default:
1190 BUG();
1191 }
1192 val |= DMA_CCMD_ICC;
1193
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001194 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001195 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1196
1197 /* Make sure hardware complete it */
1198 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1199 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1200
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001201 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001202}
1203
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001204/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001205static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1206 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001207{
1208 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1209 u64 val = 0, val_iva = 0;
1210 unsigned long flag;
1211
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001212 switch (type) {
1213 case DMA_TLB_GLOBAL_FLUSH:
1214 /* global flush doesn't need set IVA_REG */
1215 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1216 break;
1217 case DMA_TLB_DSI_FLUSH:
1218 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1219 break;
1220 case DMA_TLB_PSI_FLUSH:
1221 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001222 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001223 val_iva = size_order | addr;
1224 break;
1225 default:
1226 BUG();
1227 }
1228 /* Note: set drain read/write */
1229#if 0
1230 /*
1231 * This is probably to be super secure.. Looks like we can
1232 * ignore it without any impact.
1233 */
1234 if (cap_read_drain(iommu->cap))
1235 val |= DMA_TLB_READ_DRAIN;
1236#endif
1237 if (cap_write_drain(iommu->cap))
1238 val |= DMA_TLB_WRITE_DRAIN;
1239
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001240 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001241 /* Note: Only uses first TLB reg currently */
1242 if (val_iva)
1243 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1244 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1245
1246 /* Make sure hardware complete it */
1247 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1248 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1249
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001250 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001251
1252 /* check IOTLB invalidation granularity */
1253 if (DMA_TLB_IAIG(val) == 0)
1254 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1255 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1256 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001257 (unsigned long long)DMA_TLB_IIRG(type),
1258 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001259}
1260
David Woodhouse64ae8922014-03-09 12:52:30 -07001261static struct device_domain_info *
1262iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1263 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001264{
Yu Zhao93a23a72009-05-18 13:51:37 +08001265 int found = 0;
1266 unsigned long flags;
1267 struct device_domain_info *info;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001268 struct pci_dev *pdev;
Yu Zhao93a23a72009-05-18 13:51:37 +08001269
1270 if (!ecap_dev_iotlb_support(iommu->ecap))
1271 return NULL;
1272
1273 if (!iommu->qi)
1274 return NULL;
1275
1276 spin_lock_irqsave(&device_domain_lock, flags);
1277 list_for_each_entry(info, &domain->devices, link)
1278 if (info->bus == bus && info->devfn == devfn) {
1279 found = 1;
1280 break;
1281 }
1282 spin_unlock_irqrestore(&device_domain_lock, flags);
1283
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001284 if (!found || !info->dev || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001285 return NULL;
1286
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001287 pdev = to_pci_dev(info->dev);
1288
1289 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Yu Zhao93a23a72009-05-18 13:51:37 +08001290 return NULL;
1291
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001292 if (!dmar_find_matched_atsr_unit(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001293 return NULL;
1294
Yu Zhao93a23a72009-05-18 13:51:37 +08001295 return info;
1296}
1297
1298static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1299{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001300 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001301 return;
1302
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001303 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Yu Zhao93a23a72009-05-18 13:51:37 +08001304}
1305
1306static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1307{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001308 if (!info->dev || !dev_is_pci(info->dev) ||
1309 !pci_ats_enabled(to_pci_dev(info->dev)))
Yu Zhao93a23a72009-05-18 13:51:37 +08001310 return;
1311
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001312 pci_disable_ats(to_pci_dev(info->dev));
Yu Zhao93a23a72009-05-18 13:51:37 +08001313}
1314
1315static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1316 u64 addr, unsigned mask)
1317{
1318 u16 sid, qdep;
1319 unsigned long flags;
1320 struct device_domain_info *info;
1321
1322 spin_lock_irqsave(&device_domain_lock, flags);
1323 list_for_each_entry(info, &domain->devices, link) {
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001324 struct pci_dev *pdev;
1325 if (!info->dev || !dev_is_pci(info->dev))
1326 continue;
1327
1328 pdev = to_pci_dev(info->dev);
1329 if (!pci_ats_enabled(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001330 continue;
1331
1332 sid = info->bus << 8 | info->devfn;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001333 qdep = pci_ats_queue_depth(pdev);
Yu Zhao93a23a72009-05-18 13:51:37 +08001334 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1335 }
1336 spin_unlock_irqrestore(&device_domain_lock, flags);
1337}
1338
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001339static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouseea8ea462014-03-05 17:09:32 +00001340 unsigned long pfn, unsigned int pages, int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001341{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001342 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001343 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001344
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001345 BUG_ON(pages == 0);
1346
David Woodhouseea8ea462014-03-05 17:09:32 +00001347 if (ih)
1348 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001349 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001350 * Fallback to domain selective flush if no PSI support or the size is
1351 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001352 * PSI requires page size to be 2 ^ x, and the base address is naturally
1353 * aligned to the size
1354 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001355 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1356 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001357 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001358 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001359 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001360 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001361
1362 /*
Nadav Amit82653632010-04-01 13:24:40 +03001363 * In caching mode, changes of pages from non-present to present require
1364 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001365 */
Nadav Amit82653632010-04-01 13:24:40 +03001366 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001367 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001368}
1369
mark grossf8bab732008-02-08 04:18:38 -08001370static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1371{
1372 u32 pmen;
1373 unsigned long flags;
1374
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001375 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001376 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1377 pmen &= ~DMA_PMEN_EPM;
1378 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1379
1380 /* wait for the protected region status bit to clear */
1381 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1382 readl, !(pmen & DMA_PMEN_PRS), pmen);
1383
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001384 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001385}
1386
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001387static int iommu_enable_translation(struct intel_iommu *iommu)
1388{
1389 u32 sts;
1390 unsigned long flags;
1391
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001392 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001393 iommu->gcmd |= DMA_GCMD_TE;
1394 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001395
1396 /* Make sure hardware complete it */
1397 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001398 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001399
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001400 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001401 return 0;
1402}
1403
1404static int iommu_disable_translation(struct intel_iommu *iommu)
1405{
1406 u32 sts;
1407 unsigned long flag;
1408
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001409 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001410 iommu->gcmd &= ~DMA_GCMD_TE;
1411 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1412
1413 /* Make sure hardware complete it */
1414 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001415 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001416
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001417 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001418 return 0;
1419}
1420
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001421
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001422static int iommu_init_domains(struct intel_iommu *iommu)
1423{
1424 unsigned long ndomains;
1425 unsigned long nlongs;
1426
1427 ndomains = cap_ndoms(iommu->cap);
Jiang Liu852bdb02014-01-06 14:18:11 +08001428 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1429 iommu->seq_id, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001430 nlongs = BITS_TO_LONGS(ndomains);
1431
Donald Dutile94a91b52009-08-20 16:51:34 -04001432 spin_lock_init(&iommu->lock);
1433
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001434 /* TBD: there might be 64K domains,
1435 * consider other allocation for future chip
1436 */
1437 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1438 if (!iommu->domain_ids) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001439 pr_err("IOMMU%d: allocating domain id array failed\n",
1440 iommu->seq_id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001441 return -ENOMEM;
1442 }
1443 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1444 GFP_KERNEL);
1445 if (!iommu->domains) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001446 pr_err("IOMMU%d: allocating domain array failed\n",
1447 iommu->seq_id);
1448 kfree(iommu->domain_ids);
1449 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001450 return -ENOMEM;
1451 }
1452
1453 /*
1454 * if Caching mode is set, then invalid translations are tagged
1455 * with domainid 0. Hence we need to pre-allocate it.
1456 */
1457 if (cap_caching_mode(iommu->cap))
1458 set_bit(0, iommu->domain_ids);
1459 return 0;
1460}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001461
Jiang Liua868e6b2014-01-06 14:18:20 +08001462static void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001463{
1464 struct dmar_domain *domain;
Jiang Liu5ced12a2014-01-06 14:18:22 +08001465 int i, count;
Weidong Hanc7151a82008-12-08 22:51:37 +08001466 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001467
Donald Dutile94a91b52009-08-20 16:51:34 -04001468 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001469 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001470 /*
1471 * Domain id 0 is reserved for invalid translation
1472 * if hardware supports caching mode.
1473 */
1474 if (cap_caching_mode(iommu->cap) && i == 0)
1475 continue;
1476
Donald Dutile94a91b52009-08-20 16:51:34 -04001477 domain = iommu->domains[i];
1478 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001479
Donald Dutile94a91b52009-08-20 16:51:34 -04001480 spin_lock_irqsave(&domain->iommu_lock, flags);
Jiang Liu5ced12a2014-01-06 14:18:22 +08001481 count = --domain->iommu_count;
1482 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001483 if (count == 0)
1484 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001485 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001486 }
1487
1488 if (iommu->gcmd & DMA_GCMD_TE)
1489 iommu_disable_translation(iommu);
1490
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001491 kfree(iommu->domains);
1492 kfree(iommu->domain_ids);
Jiang Liua868e6b2014-01-06 14:18:20 +08001493 iommu->domains = NULL;
1494 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001495
Weidong Hand9630fe2008-12-08 11:06:32 +08001496 g_iommus[iommu->seq_id] = NULL;
1497
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001498 /* free context mapping */
1499 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001500}
1501
Jiang Liu92d03cc2014-02-19 14:07:28 +08001502static struct dmar_domain *alloc_domain(bool vm)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001503{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001504 /* domain id for virtual machine, it won't be set in context */
1505 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001506 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001507
1508 domain = alloc_domain_mem();
1509 if (!domain)
1510 return NULL;
1511
Suresh Siddha4c923d42009-10-02 11:01:24 -07001512 domain->nid = -1;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001513 domain->iommu_count = 0;
Mike Travis1b198bb2012-03-05 15:05:16 -08001514 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Hand71a2f32008-12-07 21:13:41 +08001515 domain->flags = 0;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001516 spin_lock_init(&domain->iommu_lock);
1517 INIT_LIST_HEAD(&domain->devices);
1518 if (vm) {
1519 domain->id = atomic_inc_return(&vm_domid);
1520 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
1521 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001522
1523 return domain;
1524}
1525
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001526static int iommu_attach_domain(struct dmar_domain *domain,
1527 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001528{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001529 int num;
1530 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001531 unsigned long flags;
1532
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001533 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001534
1535 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001536
1537 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1538 if (num >= ndomains) {
1539 spin_unlock_irqrestore(&iommu->lock, flags);
1540 printk(KERN_ERR "IOMMU: no free domain ids\n");
1541 return -ENOMEM;
1542 }
1543
1544 domain->id = num;
Jiang Liu9ebd6822014-02-19 14:07:29 +08001545 domain->iommu_count++;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001546 set_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001547 set_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001548 iommu->domains[num] = domain;
1549 spin_unlock_irqrestore(&iommu->lock, flags);
1550
1551 return 0;
1552}
1553
1554static void iommu_detach_domain(struct dmar_domain *domain,
1555 struct intel_iommu *iommu)
1556{
1557 unsigned long flags;
1558 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001559
1560 spin_lock_irqsave(&iommu->lock, flags);
1561 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001562 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001563 if (iommu->domains[num] == domain) {
Jiang Liu92d03cc2014-02-19 14:07:28 +08001564 clear_bit(num, iommu->domain_ids);
1565 iommu->domains[num] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001566 break;
1567 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001568 }
Weidong Han8c11e792008-12-08 15:29:22 +08001569 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001570}
1571
1572static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001573static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001574
Joseph Cihula51a63e62011-03-21 11:04:24 -07001575static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001576{
1577 struct pci_dev *pdev = NULL;
1578 struct iova *iova;
1579 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001580
David Millerf6611972008-02-06 01:36:23 -08001581 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001582
Mark Gross8a443df2008-03-04 14:59:31 -08001583 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1584 &reserved_rbtree_key);
1585
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001586 /* IOAPIC ranges shouldn't be accessed by DMA */
1587 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1588 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001589 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001590 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001591 return -ENODEV;
1592 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001593
1594 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1595 for_each_pci_dev(pdev) {
1596 struct resource *r;
1597
1598 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1599 r = &pdev->resource[i];
1600 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1601 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001602 iova = reserve_iova(&reserved_iova_list,
1603 IOVA_PFN(r->start),
1604 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001605 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001606 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001607 return -ENODEV;
1608 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001609 }
1610 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001611 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001612}
1613
1614static void domain_reserve_special_ranges(struct dmar_domain *domain)
1615{
1616 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1617}
1618
1619static inline int guestwidth_to_adjustwidth(int gaw)
1620{
1621 int agaw;
1622 int r = (gaw - 12) % 9;
1623
1624 if (r == 0)
1625 agaw = gaw;
1626 else
1627 agaw = gaw + 9 - r;
1628 if (agaw > 64)
1629 agaw = 64;
1630 return agaw;
1631}
1632
1633static int domain_init(struct dmar_domain *domain, int guest_width)
1634{
1635 struct intel_iommu *iommu;
1636 int adjust_width, agaw;
1637 unsigned long sagaw;
1638
David Millerf6611972008-02-06 01:36:23 -08001639 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001640 domain_reserve_special_ranges(domain);
1641
1642 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001643 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001644 if (guest_width > cap_mgaw(iommu->cap))
1645 guest_width = cap_mgaw(iommu->cap);
1646 domain->gaw = guest_width;
1647 adjust_width = guestwidth_to_adjustwidth(guest_width);
1648 agaw = width_to_agaw(adjust_width);
1649 sagaw = cap_sagaw(iommu->cap);
1650 if (!test_bit(agaw, &sagaw)) {
1651 /* hardware doesn't support it, choose a bigger one */
1652 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1653 agaw = find_next_bit(&sagaw, 5, agaw);
1654 if (agaw >= 5)
1655 return -ENODEV;
1656 }
1657 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001658
Weidong Han8e6040972008-12-08 15:49:06 +08001659 if (ecap_coherent(iommu->ecap))
1660 domain->iommu_coherency = 1;
1661 else
1662 domain->iommu_coherency = 0;
1663
Sheng Yang58c610b2009-03-18 15:33:05 +08001664 if (ecap_sc_support(iommu->ecap))
1665 domain->iommu_snooping = 1;
1666 else
1667 domain->iommu_snooping = 0;
1668
David Woodhouse214e39a2014-03-19 10:38:49 +00001669 if (intel_iommu_superpage)
1670 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1671 else
1672 domain->iommu_superpage = 0;
1673
Suresh Siddha4c923d42009-10-02 11:01:24 -07001674 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001675
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001676 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001677 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001678 if (!domain->pgd)
1679 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001680 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001681 return 0;
1682}
1683
1684static void domain_exit(struct dmar_domain *domain)
1685{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001686 struct dmar_drhd_unit *drhd;
1687 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00001688 struct page *freelist = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001689
1690 /* Domain 0 is reserved, so dont process it */
1691 if (!domain)
1692 return;
1693
Alex Williamson7b668352011-05-24 12:02:41 +01001694 /* Flush any lazy unmaps that may reference this domain */
1695 if (!intel_iommu_strict)
1696 flush_unmaps_timeout(0);
1697
Jiang Liu92d03cc2014-02-19 14:07:28 +08001698 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001699 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001700
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001701 /* destroy iovas */
1702 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001703
David Woodhouseea8ea462014-03-05 17:09:32 +00001704 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001705
Jiang Liu92d03cc2014-02-19 14:07:28 +08001706 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001707 rcu_read_lock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001708 for_each_active_iommu(iommu, drhd)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001709 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1710 test_bit(iommu->seq_id, domain->iommu_bmp))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001711 iommu_detach_domain(domain, iommu);
Jiang Liu0e242612014-02-19 14:07:34 +08001712 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001713
David Woodhouseea8ea462014-03-05 17:09:32 +00001714 dma_free_pagelist(freelist);
1715
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001716 free_domain_mem(domain);
1717}
1718
David Woodhouse64ae8922014-03-09 12:52:30 -07001719static int domain_context_mapping_one(struct dmar_domain *domain,
1720 struct intel_iommu *iommu,
1721 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001722{
1723 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001724 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001725 struct dma_pte *pgd;
1726 unsigned long num;
1727 unsigned long ndomains;
1728 int id;
1729 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001730 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001731
1732 pr_debug("Set context mapping for %02x:%02x.%d\n",
1733 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001734
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001735 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001736 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1737 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001738
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001739 context = device_to_context_entry(iommu, bus, devfn);
1740 if (!context)
1741 return -ENOMEM;
1742 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001743 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001744 spin_unlock_irqrestore(&iommu->lock, flags);
1745 return 0;
1746 }
1747
Weidong Hanea6606b2008-12-08 23:08:15 +08001748 id = domain->id;
1749 pgd = domain->pgd;
1750
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001751 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1752 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001753 int found = 0;
1754
1755 /* find an available domain id for this device in iommu */
1756 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001757 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001758 if (iommu->domains[num] == domain) {
1759 id = num;
1760 found = 1;
1761 break;
1762 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001763 }
1764
1765 if (found == 0) {
1766 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1767 if (num >= ndomains) {
1768 spin_unlock_irqrestore(&iommu->lock, flags);
1769 printk(KERN_ERR "IOMMU: no free domain ids\n");
1770 return -EFAULT;
1771 }
1772
1773 set_bit(num, iommu->domain_ids);
1774 iommu->domains[num] = domain;
1775 id = num;
1776 }
1777
1778 /* Skip top levels of page tables for
1779 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001780 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001781 */
Chris Wright1672af12009-12-02 12:06:34 -08001782 if (translation != CONTEXT_TT_PASS_THROUGH) {
1783 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1784 pgd = phys_to_virt(dma_pte_addr(pgd));
1785 if (!dma_pte_present(pgd)) {
1786 spin_unlock_irqrestore(&iommu->lock, flags);
1787 return -ENOMEM;
1788 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001789 }
1790 }
1791 }
1792
1793 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001794
Yu Zhao93a23a72009-05-18 13:51:37 +08001795 if (translation != CONTEXT_TT_PASS_THROUGH) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001796 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Yu Zhao93a23a72009-05-18 13:51:37 +08001797 translation = info ? CONTEXT_TT_DEV_IOTLB :
1798 CONTEXT_TT_MULTI_LEVEL;
1799 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001800 /*
1801 * In pass through mode, AW must be programmed to indicate the largest
1802 * AGAW value supported by hardware. And ASR is ignored by hardware.
1803 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001804 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001805 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001806 else {
1807 context_set_address_root(context, virt_to_phys(pgd));
1808 context_set_address_width(context, iommu->agaw);
1809 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001810
1811 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001812 context_set_fault_enable(context);
1813 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001814 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001815
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001816 /*
1817 * It's a non-present to present mapping. If hardware doesn't cache
1818 * non-present entry we only need to flush the write-buffer. If the
1819 * _does_ cache non-present entries, then it does so in the special
1820 * domain #0, which we have to flush:
1821 */
1822 if (cap_caching_mode(iommu->cap)) {
1823 iommu->flush.flush_context(iommu, 0,
1824 (((u16)bus) << 8) | devfn,
1825 DMA_CCMD_MASK_NOBIT,
1826 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001827 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001828 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001829 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001830 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001831 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001832 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001833
1834 spin_lock_irqsave(&domain->iommu_lock, flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08001835 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08001836 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001837 if (domain->iommu_count == 1)
1838 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001839 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001840 }
1841 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001842 return 0;
1843}
1844
1845static int
David Woodhousee1f167f2014-03-09 15:24:46 -07001846domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1847 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001848{
1849 int ret;
David Woodhousee1f167f2014-03-09 15:24:46 -07001850 struct pci_dev *pdev, *tmp, *parent;
David Woodhouse64ae8922014-03-09 12:52:30 -07001851 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001852 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001853
David Woodhousee1f167f2014-03-09 15:24:46 -07001854 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07001855 if (!iommu)
1856 return -ENODEV;
1857
David Woodhouse156baca2014-03-09 14:00:57 -07001858 ret = domain_context_mapping_one(domain, iommu, bus, devfn,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001859 translation);
David Woodhousee1f167f2014-03-09 15:24:46 -07001860 if (ret || !dev_is_pci(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001861 return ret;
1862
1863 /* dependent device mapping */
David Woodhousee1f167f2014-03-09 15:24:46 -07001864 pdev = to_pci_dev(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001865 tmp = pci_find_upstream_pcie_bridge(pdev);
1866 if (!tmp)
1867 return 0;
1868 /* Secondary interface's bus number and devfn 0 */
1869 parent = pdev->bus->self;
1870 while (parent != tmp) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001871 ret = domain_context_mapping_one(domain, iommu,
David Woodhouse276dbf992009-04-04 01:45:37 +01001872 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001873 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001874 if (ret)
1875 return ret;
1876 parent = parent->bus->self;
1877 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001878 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
David Woodhouse64ae8922014-03-09 12:52:30 -07001879 return domain_context_mapping_one(domain, iommu,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001880 tmp->subordinate->number, 0,
1881 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001882 else /* this is a legacy PCI bridge */
David Woodhouse64ae8922014-03-09 12:52:30 -07001883 return domain_context_mapping_one(domain, iommu,
David Woodhouse276dbf992009-04-04 01:45:37 +01001884 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001885 tmp->devfn,
1886 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001887}
1888
David Woodhousee1f167f2014-03-09 15:24:46 -07001889static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001890{
1891 int ret;
David Woodhousee1f167f2014-03-09 15:24:46 -07001892 struct pci_dev *pdev, *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001893 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001894 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08001895
David Woodhousee1f167f2014-03-09 15:24:46 -07001896 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001897 if (!iommu)
1898 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001899
David Woodhouse156baca2014-03-09 14:00:57 -07001900 ret = device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07001901 if (!ret || !dev_is_pci(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001902 return ret;
David Woodhousee1f167f2014-03-09 15:24:46 -07001903
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001904 /* dependent device mapping */
David Woodhousee1f167f2014-03-09 15:24:46 -07001905 pdev = to_pci_dev(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001906 tmp = pci_find_upstream_pcie_bridge(pdev);
1907 if (!tmp)
1908 return ret;
1909 /* Secondary interface's bus number and devfn 0 */
1910 parent = pdev->bus->self;
1911 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001912 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001913 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001914 if (!ret)
1915 return ret;
1916 parent = parent->bus->self;
1917 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001918 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001919 return device_context_mapped(iommu, tmp->subordinate->number,
1920 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001921 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001922 return device_context_mapped(iommu, tmp->bus->number,
1923 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001924}
1925
Fenghua Yuf5329592009-08-04 15:09:37 -07001926/* Returns a number of VTD pages, but aligned to MM page size */
1927static inline unsigned long aligned_nrpages(unsigned long host_addr,
1928 size_t size)
1929{
1930 host_addr &= ~PAGE_MASK;
1931 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1932}
1933
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001934/* Return largest possible superpage level for a given mapping */
1935static inline int hardware_largepage_caps(struct dmar_domain *domain,
1936 unsigned long iov_pfn,
1937 unsigned long phy_pfn,
1938 unsigned long pages)
1939{
1940 int support, level = 1;
1941 unsigned long pfnmerge;
1942
1943 support = domain->iommu_superpage;
1944
1945 /* To use a large page, the virtual *and* physical addresses
1946 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1947 of them will mean we have to use smaller pages. So just
1948 merge them and check both at once. */
1949 pfnmerge = iov_pfn | phy_pfn;
1950
1951 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1952 pages >>= VTD_STRIDE_SHIFT;
1953 if (!pages)
1954 break;
1955 pfnmerge >>= VTD_STRIDE_SHIFT;
1956 level++;
1957 support--;
1958 }
1959 return level;
1960}
1961
David Woodhouse9051aa02009-06-29 12:30:54 +01001962static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1963 struct scatterlist *sg, unsigned long phys_pfn,
1964 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001965{
1966 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001967 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001968 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001969 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001970 unsigned int largepage_lvl = 0;
1971 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001972
1973 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1974
1975 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1976 return -EINVAL;
1977
1978 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1979
David Woodhouse9051aa02009-06-29 12:30:54 +01001980 if (sg)
1981 sg_res = 0;
1982 else {
1983 sg_res = nr_pages + 1;
1984 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1985 }
1986
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001987 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001988 uint64_t tmp;
1989
David Woodhousee1605492009-06-29 11:17:38 +01001990 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001991 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001992 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1993 sg->dma_length = sg->length;
1994 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001995 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001996 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001997
David Woodhousee1605492009-06-29 11:17:38 +01001998 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001999 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2000
David Woodhouse5cf0a762014-03-19 16:07:49 +00002001 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002002 if (!pte)
2003 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002004 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002005 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002006 pteval |= DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002007 /* Ensure that old small page tables are removed to make room
2008 for superpage, if they exist. */
2009 dma_pte_clear_range(domain, iov_pfn,
2010 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
2011 dma_pte_free_pagetable(domain, iov_pfn,
2012 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
2013 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002014 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002015 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002016
David Woodhousee1605492009-06-29 11:17:38 +01002017 }
2018 /* We don't need lock here, nobody else
2019 * touches the iova range
2020 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002021 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002022 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002023 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01002024 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2025 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002026 if (dumps) {
2027 dumps--;
2028 debug_dma_dump_mappings(NULL);
2029 }
2030 WARN_ON(1);
2031 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002032
2033 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2034
2035 BUG_ON(nr_pages < lvl_pages);
2036 BUG_ON(sg_res < lvl_pages);
2037
2038 nr_pages -= lvl_pages;
2039 iov_pfn += lvl_pages;
2040 phys_pfn += lvl_pages;
2041 pteval += lvl_pages * VTD_PAGE_SIZE;
2042 sg_res -= lvl_pages;
2043
2044 /* If the next PTE would be the first in a new page, then we
2045 need to flush the cache on the entries we've just written.
2046 And then we'll need to recalculate 'pte', so clear it and
2047 let it get set again in the if (!pte) block above.
2048
2049 If we're done (!nr_pages) we need to flush the cache too.
2050
2051 Also if we've been setting superpages, we may need to
2052 recalculate 'pte' and switch back to smaller pages for the
2053 end of the mapping, if the trailing size is not enough to
2054 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002055 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002056 if (!nr_pages || first_pte_in_page(pte) ||
2057 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002058 domain_flush_cache(domain, first_pte,
2059 (void *)pte - (void *)first_pte);
2060 pte = NULL;
2061 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002062
2063 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002064 sg = sg_next(sg);
2065 }
2066 return 0;
2067}
2068
David Woodhouse9051aa02009-06-29 12:30:54 +01002069static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2070 struct scatterlist *sg, unsigned long nr_pages,
2071 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002072{
David Woodhouse9051aa02009-06-29 12:30:54 +01002073 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2074}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002075
David Woodhouse9051aa02009-06-29 12:30:54 +01002076static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2077 unsigned long phys_pfn, unsigned long nr_pages,
2078 int prot)
2079{
2080 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002081}
2082
Weidong Hanc7151a82008-12-08 22:51:37 +08002083static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002084{
Weidong Hanc7151a82008-12-08 22:51:37 +08002085 if (!iommu)
2086 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002087
2088 clear_context_table(iommu, bus, devfn);
2089 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002090 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002091 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002092}
2093
David Woodhouse109b9b02012-05-25 17:43:02 +01002094static inline void unlink_domain_info(struct device_domain_info *info)
2095{
2096 assert_spin_locked(&device_domain_lock);
2097 list_del(&info->link);
2098 list_del(&info->global);
2099 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002100 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002101}
2102
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002103static void domain_remove_dev_info(struct dmar_domain *domain)
2104{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002105 struct device_domain_info *info, *tmp;
Jiang Liu92d03cc2014-02-19 14:07:28 +08002106 unsigned long flags, flags2;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002107
2108 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wang3a74ca02014-05-20 20:37:47 +08002109 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhouse109b9b02012-05-25 17:43:02 +01002110 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002111 spin_unlock_irqrestore(&device_domain_lock, flags);
2112
Yu Zhao93a23a72009-05-18 13:51:37 +08002113 iommu_disable_dev_iotlb(info);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002114 iommu_detach_dev(info->iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002115
Jiang Liu92d03cc2014-02-19 14:07:28 +08002116 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
David Woodhouse7c7faa12014-03-09 13:33:06 -07002117 iommu_detach_dependent_devices(info->iommu, info->dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +08002118 /* clear this iommu in iommu_bmp, update iommu count
2119 * and capabilities
2120 */
2121 spin_lock_irqsave(&domain->iommu_lock, flags2);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002122 if (test_and_clear_bit(info->iommu->seq_id,
Jiang Liu92d03cc2014-02-19 14:07:28 +08002123 domain->iommu_bmp)) {
2124 domain->iommu_count--;
2125 domain_update_iommu_cap(domain);
2126 }
2127 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
2128 }
2129
2130 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002131 spin_lock_irqsave(&device_domain_lock, flags);
2132 }
2133 spin_unlock_irqrestore(&device_domain_lock, flags);
2134}
2135
2136/*
2137 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002138 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002139 */
David Woodhouse1525a292014-03-06 16:19:30 +00002140static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002141{
2142 struct device_domain_info *info;
2143
2144 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002145 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002146 if (info)
2147 return info->domain;
2148 return NULL;
2149}
2150
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002151static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002152dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2153{
2154 struct device_domain_info *info;
2155
2156 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002157 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002158 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002159 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002160
2161 return NULL;
2162}
2163
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002164static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
David Woodhouse41e80dca2014-03-09 13:55:54 -07002165 int bus, int devfn,
David Woodhouseb718cd32014-03-09 13:11:33 -07002166 struct device *dev,
2167 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002168{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002169 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002170 struct device_domain_info *info;
2171 unsigned long flags;
2172
2173 info = alloc_devinfo_mem();
2174 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002175 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002176
Jiang Liu745f2582014-02-19 14:07:26 +08002177 info->bus = bus;
2178 info->devfn = devfn;
2179 info->dev = dev;
2180 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002181 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002182 if (!dev)
2183 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
2184
2185 spin_lock_irqsave(&device_domain_lock, flags);
2186 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002187 found = find_domain(dev);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002188 else {
2189 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002190 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002191 if (info2)
2192 found = info2->domain;
2193 }
Jiang Liu745f2582014-02-19 14:07:26 +08002194 if (found) {
2195 spin_unlock_irqrestore(&device_domain_lock, flags);
2196 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002197 /* Caller must free the original domain */
2198 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002199 }
2200
David Woodhouseb718cd32014-03-09 13:11:33 -07002201 list_add(&info->link, &domain->devices);
2202 list_add(&info->global, &device_domain_list);
2203 if (dev)
2204 dev->archdata.iommu = info;
2205 spin_unlock_irqrestore(&device_domain_lock, flags);
2206
2207 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002208}
2209
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002210/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002211static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002212{
Jiang Liue85bb5d2014-02-19 14:07:27 +08002213 struct dmar_domain *domain, *free = NULL;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002214 struct intel_iommu *iommu = NULL;
2215 struct device_domain_info *info;
David Woodhouse146922e2014-03-09 15:44:17 -07002216 struct pci_dev *dev_tmp = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002217 unsigned long flags;
David Woodhouse146922e2014-03-09 15:44:17 -07002218 u8 bus, devfn, bridge_bus, bridge_devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002219
David Woodhouse146922e2014-03-09 15:44:17 -07002220 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002221 if (domain)
2222 return domain;
2223
David Woodhouse146922e2014-03-09 15:44:17 -07002224 if (dev_is_pci(dev)) {
2225 struct pci_dev *pdev = to_pci_dev(dev);
2226 u16 segment;
David Woodhouse276dbf992009-04-04 01:45:37 +01002227
David Woodhouse146922e2014-03-09 15:44:17 -07002228 segment = pci_domain_nr(pdev->bus);
2229 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
2230 if (dev_tmp) {
2231 if (pci_is_pcie(dev_tmp)) {
2232 bridge_bus = dev_tmp->subordinate->number;
2233 bridge_devfn = 0;
2234 } else {
2235 bridge_bus = dev_tmp->bus->number;
2236 bridge_devfn = dev_tmp->devfn;
2237 }
2238 spin_lock_irqsave(&device_domain_lock, flags);
David Woodhouse9f05d3f2014-04-14 22:01:30 -07002239 info = dmar_search_domain_by_dev_info(segment,
2240 bridge_bus,
2241 bridge_devfn);
David Woodhouse146922e2014-03-09 15:44:17 -07002242 if (info) {
2243 iommu = info->iommu;
2244 domain = info->domain;
2245 }
2246 spin_unlock_irqrestore(&device_domain_lock, flags);
2247 /* pcie-pci bridge already has a domain, uses it */
2248 if (info)
2249 goto found_domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002250 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002251 }
2252
David Woodhouse146922e2014-03-09 15:44:17 -07002253 iommu = device_to_iommu(dev, &bus, &devfn);
2254 if (!iommu)
2255 goto error;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002256
David Woodhouse146922e2014-03-09 15:44:17 -07002257 /* Allocate and initialize new domain for the device */
Jiang Liu92d03cc2014-02-19 14:07:28 +08002258 domain = alloc_domain(false);
Jiang Liu745f2582014-02-19 14:07:26 +08002259 if (!domain)
2260 goto error;
2261 if (iommu_attach_domain(domain, iommu)) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002262 free_domain_mem(domain);
Dan Carpenter14d40562014-03-28 11:29:50 +03002263 domain = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002264 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002265 }
Jiang Liue85bb5d2014-02-19 14:07:27 +08002266 free = domain;
2267 if (domain_init(domain, gaw))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002268 goto error;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002269
2270 /* register pcie-to-pci device */
2271 if (dev_tmp) {
David Woodhouse146922e2014-03-09 15:44:17 -07002272 domain = dmar_insert_dev_info(iommu, bridge_bus, bridge_devfn,
2273 NULL, domain);
David Woodhouseb718cd32014-03-09 13:11:33 -07002274 if (!domain)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002275 goto error;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002276 }
2277
2278found_domain:
David Woodhouse146922e2014-03-09 15:44:17 -07002279 domain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002280error:
David Woodhouseb718cd32014-03-09 13:11:33 -07002281 if (free != domain)
Jiang Liue85bb5d2014-02-19 14:07:27 +08002282 domain_exit(free);
David Woodhouseb718cd32014-03-09 13:11:33 -07002283
2284 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002285}
2286
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002287static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002288#define IDENTMAP_ALL 1
2289#define IDENTMAP_GFX 2
2290#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002291
David Woodhouseb2132032009-06-26 18:50:28 +01002292static int iommu_domain_identity_map(struct dmar_domain *domain,
2293 unsigned long long start,
2294 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002295{
David Woodhousec5395d52009-06-28 16:35:56 +01002296 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2297 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002298
David Woodhousec5395d52009-06-28 16:35:56 +01002299 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2300 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002301 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002302 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002303 }
2304
David Woodhousec5395d52009-06-28 16:35:56 +01002305 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2306 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002307 /*
2308 * RMRR range might have overlap with physical memory range,
2309 * clear it first
2310 */
David Woodhousec5395d52009-06-28 16:35:56 +01002311 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002312
David Woodhousec5395d52009-06-28 16:35:56 +01002313 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2314 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002315 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002316}
2317
David Woodhouse0b9d9752014-03-09 15:48:15 -07002318static int iommu_prepare_identity_map(struct device *dev,
David Woodhouseb2132032009-06-26 18:50:28 +01002319 unsigned long long start,
2320 unsigned long long end)
2321{
2322 struct dmar_domain *domain;
2323 int ret;
2324
David Woodhouse0b9d9752014-03-09 15:48:15 -07002325 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002326 if (!domain)
2327 return -ENOMEM;
2328
David Woodhouse19943b02009-08-04 16:19:20 +01002329 /* For _hardware_ passthrough, don't bother. But for software
2330 passthrough, we do it anyway -- it may indicate a memory
2331 range which is reserved in E820, so which didn't get set
2332 up to start with in si_domain */
2333 if (domain == si_domain && hw_pass_through) {
2334 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002335 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002336 return 0;
2337 }
2338
2339 printk(KERN_INFO
2340 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002341 dev_name(dev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002342
David Woodhouse5595b522009-12-02 09:21:55 +00002343 if (end < start) {
2344 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2345 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2346 dmi_get_system_info(DMI_BIOS_VENDOR),
2347 dmi_get_system_info(DMI_BIOS_VERSION),
2348 dmi_get_system_info(DMI_PRODUCT_VERSION));
2349 ret = -EIO;
2350 goto error;
2351 }
2352
David Woodhouse2ff729f2009-08-26 14:25:41 +01002353 if (end >> agaw_to_width(domain->agaw)) {
2354 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2355 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2356 agaw_to_width(domain->agaw),
2357 dmi_get_system_info(DMI_BIOS_VENDOR),
2358 dmi_get_system_info(DMI_BIOS_VERSION),
2359 dmi_get_system_info(DMI_PRODUCT_VERSION));
2360 ret = -EIO;
2361 goto error;
2362 }
David Woodhouse19943b02009-08-04 16:19:20 +01002363
David Woodhouseb2132032009-06-26 18:50:28 +01002364 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002365 if (ret)
2366 goto error;
2367
2368 /* context entry init */
David Woodhouse0b9d9752014-03-09 15:48:15 -07002369 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002370 if (ret)
2371 goto error;
2372
2373 return 0;
2374
2375 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002376 domain_exit(domain);
2377 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002378}
2379
2380static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002381 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002382{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002383 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002384 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002385 return iommu_prepare_identity_map(dev, rmrr->base_address,
2386 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002387}
2388
Suresh Siddhad3f13812011-08-23 17:05:25 -07002389#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002390static inline void iommu_prepare_isa(void)
2391{
2392 struct pci_dev *pdev;
2393 int ret;
2394
2395 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2396 if (!pdev)
2397 return;
2398
David Woodhousec7ab48d2009-06-26 19:10:36 +01002399 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002400 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002401
2402 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002403 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2404 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002405
2406}
2407#else
2408static inline void iommu_prepare_isa(void)
2409{
2410 return;
2411}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002412#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002413
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002414static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002415
Matt Kraai071e1372009-08-23 22:30:22 -07002416static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002417{
2418 struct dmar_drhd_unit *drhd;
2419 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002420 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002421
Jiang Liu92d03cc2014-02-19 14:07:28 +08002422 si_domain = alloc_domain(false);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002423 if (!si_domain)
2424 return -EFAULT;
2425
Jiang Liu92d03cc2014-02-19 14:07:28 +08002426 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2427
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002428 for_each_active_iommu(iommu, drhd) {
2429 ret = iommu_attach_domain(si_domain, iommu);
2430 if (ret) {
2431 domain_exit(si_domain);
2432 return -EFAULT;
2433 }
2434 }
2435
2436 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2437 domain_exit(si_domain);
2438 return -EFAULT;
2439 }
2440
Jiang Liu9544c002014-01-06 14:18:13 +08002441 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2442 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002443
David Woodhouse19943b02009-08-04 16:19:20 +01002444 if (hw)
2445 return 0;
2446
David Woodhousec7ab48d2009-06-26 19:10:36 +01002447 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002448 unsigned long start_pfn, end_pfn;
2449 int i;
2450
2451 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2452 ret = iommu_domain_identity_map(si_domain,
2453 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2454 if (ret)
2455 return ret;
2456 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002457 }
2458
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002459 return 0;
2460}
2461
David Woodhouse9b226622014-03-09 14:03:28 -07002462static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002463{
2464 struct device_domain_info *info;
2465
2466 if (likely(!iommu_identity_mapping))
2467 return 0;
2468
David Woodhouse9b226622014-03-09 14:03:28 -07002469 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002470 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2471 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002472
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002473 return 0;
2474}
2475
2476static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5913c9b2014-03-09 16:27:31 -07002477 struct device *dev, int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002478{
David Woodhouse0ac72662014-03-09 13:19:22 -07002479 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002480 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002481 u8 bus, devfn;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002482 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002483
David Woodhouse5913c9b2014-03-09 16:27:31 -07002484 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002485 if (!iommu)
2486 return -ENODEV;
2487
David Woodhouse5913c9b2014-03-09 16:27:31 -07002488 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002489 if (ndomain != domain)
2490 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002491
David Woodhouse5913c9b2014-03-09 16:27:31 -07002492 ret = domain_context_mapping(domain, dev, translation);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002493 if (ret) {
David Woodhouse5913c9b2014-03-09 16:27:31 -07002494 domain_remove_one_dev_info(domain, dev);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002495 return ret;
2496 }
2497
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002498 return 0;
2499}
2500
David Woodhouse0b9d9752014-03-09 15:48:15 -07002501static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002502{
2503 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002504 struct device *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002505 int i;
2506
Jiang Liu0e242612014-02-19 14:07:34 +08002507 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002508 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002509 /*
2510 * Return TRUE if this RMRR contains the device that
2511 * is passed in.
2512 */
2513 for_each_active_dev_scope(rmrr->devices,
2514 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002515 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002516 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002517 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002518 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002519 }
Jiang Liu0e242612014-02-19 14:07:34 +08002520 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002521 return false;
2522}
2523
David Woodhouse3bdb2592014-03-09 16:03:08 -07002524static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002525{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002526
David Woodhouse3bdb2592014-03-09 16:03:08 -07002527 if (dev_is_pci(dev)) {
2528 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002529
David Woodhouse3bdb2592014-03-09 16:03:08 -07002530 /*
2531 * We want to prevent any device associated with an RMRR from
2532 * getting placed into the SI Domain. This is done because
2533 * problems exist when devices are moved in and out of domains
2534 * and their respective RMRR info is lost. We exempt USB devices
2535 * from this process due to their usage of RMRRs that are known
2536 * to not be needed after BIOS hand-off to OS.
2537 */
2538 if (device_has_rmrr(dev) &&
2539 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2540 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002541
David Woodhouse3bdb2592014-03-09 16:03:08 -07002542 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2543 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002544
David Woodhouse3bdb2592014-03-09 16:03:08 -07002545 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2546 return 1;
2547
2548 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2549 return 0;
2550
2551 /*
2552 * We want to start off with all devices in the 1:1 domain, and
2553 * take them out later if we find they can't access all of memory.
2554 *
2555 * However, we can't do this for PCI devices behind bridges,
2556 * because all PCI devices behind the same bridge will end up
2557 * with the same source-id on their transactions.
2558 *
2559 * Practically speaking, we can't change things around for these
2560 * devices at run-time, because we can't be sure there'll be no
2561 * DMA transactions in flight for any of their siblings.
2562 *
2563 * So PCI devices (unless they're on the root bus) as well as
2564 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2565 * the 1:1 domain, just in _case_ one of their siblings turns out
2566 * not to be able to map all of memory.
2567 */
2568 if (!pci_is_pcie(pdev)) {
2569 if (!pci_is_root_bus(pdev->bus))
2570 return 0;
2571 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2572 return 0;
2573 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2574 return 0;
2575 } else {
2576 if (device_has_rmrr(dev))
2577 return 0;
2578 }
David Woodhouse6941af22009-07-04 18:24:27 +01002579
David Woodhouse3dfc8132009-07-04 19:11:08 +01002580 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002581 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002582 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002583 * take them out of the 1:1 domain later.
2584 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002585 if (!startup) {
2586 /*
2587 * If the device's dma_mask is less than the system's memory
2588 * size then this is not a candidate for identity mapping.
2589 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002590 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002591
David Woodhouse3bdb2592014-03-09 16:03:08 -07002592 if (dev->coherent_dma_mask &&
2593 dev->coherent_dma_mask < dma_mask)
2594 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002595
David Woodhouse3bdb2592014-03-09 16:03:08 -07002596 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002597 }
David Woodhouse6941af22009-07-04 18:24:27 +01002598
2599 return 1;
2600}
2601
David Woodhousecf04eee2014-03-21 16:49:04 +00002602static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2603{
2604 int ret;
2605
2606 if (!iommu_should_identity_map(dev, 1))
2607 return 0;
2608
2609 ret = domain_add_dev_info(si_domain, dev,
2610 hw ? CONTEXT_TT_PASS_THROUGH :
2611 CONTEXT_TT_MULTI_LEVEL);
2612 if (!ret)
2613 pr_info("IOMMU: %s identity mapping for device %s\n",
2614 hw ? "hardware" : "software", dev_name(dev));
2615 else if (ret == -ENODEV)
2616 /* device not associated with an iommu */
2617 ret = 0;
2618
2619 return ret;
2620}
2621
2622
Matt Kraai071e1372009-08-23 22:30:22 -07002623static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002624{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002625 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002626 struct dmar_drhd_unit *drhd;
2627 struct intel_iommu *iommu;
2628 struct device *dev;
2629 int i;
2630 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002631
David Woodhouse19943b02009-08-04 16:19:20 +01002632 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002633 if (ret)
2634 return -EFAULT;
2635
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002636 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002637 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2638 if (ret)
2639 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002640 }
2641
David Woodhousecf04eee2014-03-21 16:49:04 +00002642 for_each_active_iommu(iommu, drhd)
2643 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2644 struct acpi_device_physical_node *pn;
2645 struct acpi_device *adev;
2646
2647 if (dev->bus != &acpi_bus_type)
2648 continue;
2649
2650 adev= to_acpi_device(dev);
2651 mutex_lock(&adev->physical_node_lock);
2652 list_for_each_entry(pn, &adev->physical_node_list, node) {
2653 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2654 if (ret)
2655 break;
2656 }
2657 mutex_unlock(&adev->physical_node_lock);
2658 if (ret)
2659 return ret;
2660 }
2661
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002662 return 0;
2663}
2664
Joseph Cihulab7792602011-05-03 00:08:37 -07002665static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002666{
2667 struct dmar_drhd_unit *drhd;
2668 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002669 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002670 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002671 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002672
2673 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002674 * for each drhd
2675 * allocate root
2676 * initialize and program root entry to not present
2677 * endfor
2678 */
2679 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002680 /*
2681 * lock not needed as this is only incremented in the single
2682 * threaded kernel __init code path all other access are read
2683 * only
2684 */
Mike Travis1b198bb2012-03-05 15:05:16 -08002685 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2686 g_num_of_iommus++;
2687 continue;
2688 }
2689 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2690 IOMMU_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002691 }
2692
Weidong Hand9630fe2008-12-08 11:06:32 +08002693 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2694 GFP_KERNEL);
2695 if (!g_iommus) {
2696 printk(KERN_ERR "Allocating global iommu array failed\n");
2697 ret = -ENOMEM;
2698 goto error;
2699 }
2700
mark gross80b20dd2008-04-18 13:53:58 -07002701 deferred_flush = kzalloc(g_num_of_iommus *
2702 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2703 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002704 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08002705 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08002706 }
2707
Jiang Liu7c919772014-01-06 14:18:18 +08002708 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002709 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002710
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002711 ret = iommu_init_domains(iommu);
2712 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002713 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002714
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002715 /*
2716 * TBD:
2717 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002718 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002719 */
2720 ret = iommu_alloc_root_entry(iommu);
2721 if (ret) {
2722 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002723 goto free_iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002724 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002725 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002726 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002727 }
2728
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002729 /*
2730 * Start from the sane iommu hardware state.
2731 */
Jiang Liu7c919772014-01-06 14:18:18 +08002732 for_each_active_iommu(iommu, drhd) {
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002733 /*
2734 * If the queued invalidation is already initialized by us
2735 * (for example, while enabling interrupt-remapping) then
2736 * we got the things already rolling from a sane state.
2737 */
2738 if (iommu->qi)
2739 continue;
2740
2741 /*
2742 * Clear any previous faults.
2743 */
2744 dmar_fault(-1, iommu);
2745 /*
2746 * Disable queued invalidation if supported and already enabled
2747 * before OS handover.
2748 */
2749 dmar_disable_qi(iommu);
2750 }
2751
Jiang Liu7c919772014-01-06 14:18:18 +08002752 for_each_active_iommu(iommu, drhd) {
Youquan Songa77b67d2008-10-16 16:31:56 -07002753 if (dmar_enable_qi(iommu)) {
2754 /*
2755 * Queued Invalidate not enabled, use Register Based
2756 * Invalidate
2757 */
2758 iommu->flush.flush_context = __iommu_flush_context;
2759 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002760 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002761 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002762 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002763 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002764 } else {
2765 iommu->flush.flush_context = qi_flush_context;
2766 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002767 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002768 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002769 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002770 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002771 }
2772 }
2773
David Woodhouse19943b02009-08-04 16:19:20 +01002774 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002775 iommu_identity_mapping |= IDENTMAP_ALL;
2776
Suresh Siddhad3f13812011-08-23 17:05:25 -07002777#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002778 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002779#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002780
2781 check_tylersburg_isoch();
2782
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002783 /*
2784 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002785 * identity mappings for rmrr, gfx, and isa and may fall back to static
2786 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002787 */
David Woodhouse19943b02009-08-04 16:19:20 +01002788 if (iommu_identity_mapping) {
2789 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2790 if (ret) {
2791 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002792 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002793 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002794 }
David Woodhouse19943b02009-08-04 16:19:20 +01002795 /*
2796 * For each rmrr
2797 * for each dev attached to rmrr
2798 * do
2799 * locate drhd for dev, alloc domain for dev
2800 * allocate free domain
2801 * allocate page table entries for rmrr
2802 * if context not allocated for bus
2803 * allocate and init context
2804 * set present in root table for this bus
2805 * init context with domain, translation etc
2806 * endfor
2807 * endfor
2808 */
2809 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2810 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002811 /* some BIOS lists non-exist devices in DMAR table. */
2812 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00002813 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07002814 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01002815 if (ret)
2816 printk(KERN_ERR
2817 "IOMMU: mapping reserved region failed\n");
2818 }
2819 }
2820
2821 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002822
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002823 /*
2824 * for each drhd
2825 * enable fault log
2826 * global invalidate context cache
2827 * global invalidate iotlb
2828 * enable translation
2829 */
Jiang Liu7c919772014-01-06 14:18:18 +08002830 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002831 if (drhd->ignored) {
2832 /*
2833 * we always have to disable PMRs or DMA may fail on
2834 * this device
2835 */
2836 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08002837 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002838 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002839 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002840
2841 iommu_flush_write_buffer(iommu);
2842
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002843 ret = dmar_set_interrupt(iommu);
2844 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002845 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002846
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002847 iommu_set_root_entry(iommu);
2848
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002849 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002850 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002851
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002852 ret = iommu_enable_translation(iommu);
2853 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002854 goto free_iommu;
David Woodhouseb94996c2009-09-19 15:28:12 -07002855
2856 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002857 }
2858
2859 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08002860
2861free_iommu:
Jiang Liu7c919772014-01-06 14:18:18 +08002862 for_each_active_iommu(iommu, drhd)
Jiang Liua868e6b2014-01-06 14:18:20 +08002863 free_dmar_iommu(iommu);
Jiang Liu9bdc5312014-01-06 14:18:27 +08002864 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08002865free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08002866 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08002867error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002868 return ret;
2869}
2870
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002871/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002872static struct iova *intel_alloc_iova(struct device *dev,
2873 struct dmar_domain *domain,
2874 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002875{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002876 struct iova *iova = NULL;
2877
David Woodhouse875764d2009-06-28 21:20:51 +01002878 /* Restrict dma_mask to the width that the iommu can handle */
2879 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2880
2881 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002882 /*
2883 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002884 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002885 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002886 */
David Woodhouse875764d2009-06-28 21:20:51 +01002887 iova = alloc_iova(&domain->iovad, nrpages,
2888 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2889 if (iova)
2890 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002891 }
David Woodhouse875764d2009-06-28 21:20:51 +01002892 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2893 if (unlikely(!iova)) {
2894 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07002895 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002896 return NULL;
2897 }
2898
2899 return iova;
2900}
2901
David Woodhoused4b709f2014-03-09 16:07:40 -07002902static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002903{
2904 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002905 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002906
David Woodhoused4b709f2014-03-09 16:07:40 -07002907 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002908 if (!domain) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002909 printk(KERN_ERR "Allocating domain for %s failed",
2910 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002911 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002912 }
2913
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002914 /* make sure context mapping is ok */
David Woodhoused4b709f2014-03-09 16:07:40 -07002915 if (unlikely(!domain_context_mapped(dev))) {
2916 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002917 if (ret) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002918 printk(KERN_ERR "Domain context map for %s failed",
2919 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002920 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002921 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002922 }
2923
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002924 return domain;
2925}
2926
David Woodhoused4b709f2014-03-09 16:07:40 -07002927static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01002928{
2929 struct device_domain_info *info;
2930
2931 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07002932 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01002933 if (likely(info))
2934 return info->domain;
2935
2936 return __get_valid_domain_for_dev(dev);
2937}
2938
David Woodhouse3d891942014-03-06 15:59:26 +00002939static int iommu_dummy(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002940{
David Woodhouse3d891942014-03-06 15:59:26 +00002941 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002942}
2943
David Woodhouseecb509e2014-03-09 16:29:55 -07002944/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002945static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002946{
2947 int found;
2948
David Woodhouse3d891942014-03-06 15:59:26 +00002949 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002950 return 1;
2951
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002952 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002953 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002954
David Woodhouse9b226622014-03-09 14:03:28 -07002955 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002956 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07002957 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002958 return 1;
2959 else {
2960 /*
2961 * 32 bit DMA is removed from si_domain and fall back
2962 * to non-identity mapping.
2963 */
David Woodhousebf9c9ed2014-03-09 16:19:13 -07002964 domain_remove_one_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002965 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07002966 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002967 return 0;
2968 }
2969 } else {
2970 /*
2971 * In case of a detached 64 bit DMA device from vm, the device
2972 * is put into si_domain for identity mapping.
2973 */
David Woodhouseecb509e2014-03-09 16:29:55 -07002974 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002975 int ret;
David Woodhouse5913c9b2014-03-09 16:27:31 -07002976 ret = domain_add_dev_info(si_domain, dev,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002977 hw_pass_through ?
2978 CONTEXT_TT_PASS_THROUGH :
2979 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002980 if (!ret) {
2981 printk(KERN_INFO "64bit %s uses identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07002982 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002983 return 1;
2984 }
2985 }
2986 }
2987
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002988 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002989}
2990
David Woodhouse5040a912014-03-09 16:14:00 -07002991static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002992 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002993{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002994 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002995 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002996 struct iova *iova;
2997 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002998 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002999 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003000 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003001
3002 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003003
David Woodhouse5040a912014-03-09 16:14:00 -07003004 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003005 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003006
David Woodhouse5040a912014-03-09 16:14:00 -07003007 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003008 if (!domain)
3009 return 0;
3010
Weidong Han8c11e792008-12-08 15:29:22 +08003011 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003012 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003013
David Woodhouse5040a912014-03-09 16:14:00 -07003014 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003015 if (!iova)
3016 goto error;
3017
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003018 /*
3019 * Check if DMAR supports zero-length reads on write only
3020 * mappings..
3021 */
3022 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003023 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003024 prot |= DMA_PTE_READ;
3025 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3026 prot |= DMA_PTE_WRITE;
3027 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003028 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003029 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003030 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003031 * is not a big problem
3032 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003033 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003034 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003035 if (ret)
3036 goto error;
3037
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003038 /* it's a non-present to present mapping. Only flush if caching mode */
3039 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003040 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003041 else
Weidong Han8c11e792008-12-08 15:29:22 +08003042 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003043
David Woodhouse03d6a242009-06-28 15:33:46 +01003044 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3045 start_paddr += paddr & ~PAGE_MASK;
3046 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003047
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003048error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003049 if (iova)
3050 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00003051 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003052 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003053 return 0;
3054}
3055
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003056static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3057 unsigned long offset, size_t size,
3058 enum dma_data_direction dir,
3059 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003060{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003061 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003062 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003063}
3064
mark gross5e0d2a62008-03-04 15:22:08 -08003065static void flush_unmaps(void)
3066{
mark gross80b20dd2008-04-18 13:53:58 -07003067 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003068
mark gross5e0d2a62008-03-04 15:22:08 -08003069 timer_on = 0;
3070
3071 /* just flush them all */
3072 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003073 struct intel_iommu *iommu = g_iommus[i];
3074 if (!iommu)
3075 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003076
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003077 if (!deferred_flush[i].next)
3078 continue;
3079
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003080 /* In caching mode, global flushes turn emulation expensive */
3081 if (!cap_caching_mode(iommu->cap))
3082 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003083 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003084 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003085 unsigned long mask;
3086 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003087 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003088
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003089 /* On real hardware multiple invalidations are expensive */
3090 if (cap_caching_mode(iommu->cap))
3091 iommu_flush_iotlb_psi(iommu, domain->id,
David Woodhouseea8ea462014-03-05 17:09:32 +00003092 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1,
3093 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003094 else {
3095 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
3096 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3097 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3098 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003099 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003100 if (deferred_flush[i].freelist[j])
3101 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003102 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003103 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003104 }
3105
mark gross5e0d2a62008-03-04 15:22:08 -08003106 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003107}
3108
3109static void flush_unmaps_timeout(unsigned long data)
3110{
mark gross80b20dd2008-04-18 13:53:58 -07003111 unsigned long flags;
3112
3113 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003114 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003115 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003116}
3117
David Woodhouseea8ea462014-03-05 17:09:32 +00003118static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003119{
3120 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003121 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003122 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003123
3124 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003125 if (list_size == HIGH_WATER_MARK)
3126 flush_unmaps();
3127
Weidong Han8c11e792008-12-08 15:29:22 +08003128 iommu = domain_get_iommu(dom);
3129 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003130
mark gross80b20dd2008-04-18 13:53:58 -07003131 next = deferred_flush[iommu_id].next;
3132 deferred_flush[iommu_id].domain[next] = dom;
3133 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003134 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003135 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003136
3137 if (!timer_on) {
3138 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3139 timer_on = 1;
3140 }
3141 list_size++;
3142 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3143}
3144
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003145static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3146 size_t size, enum dma_data_direction dir,
3147 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003148{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003149 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003150 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003151 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003152 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003153 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003154
David Woodhouse73676832009-07-04 14:08:36 +01003155 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003156 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003157
David Woodhouse1525a292014-03-06 16:19:30 +00003158 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003159 BUG_ON(!domain);
3160
Weidong Han8c11e792008-12-08 15:29:22 +08003161 iommu = domain_get_iommu(domain);
3162
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003163 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003164 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3165 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003166 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003167
David Woodhoused794dc92009-06-28 00:27:49 +01003168 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3169 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003170
David Woodhoused794dc92009-06-28 00:27:49 +01003171 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003172 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003173
David Woodhouseea8ea462014-03-05 17:09:32 +00003174 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003175
mark gross5e0d2a62008-03-04 15:22:08 -08003176 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01003177 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003178 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003179 /* free iova */
3180 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003181 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003182 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003183 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003184 /*
3185 * queue up the release of the unmap to save the 1/6th of the
3186 * cpu used up by the iotlb flush operation...
3187 */
mark gross5e0d2a62008-03-04 15:22:08 -08003188 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003189}
3190
David Woodhouse5040a912014-03-09 16:14:00 -07003191static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003192 dma_addr_t *dma_handle, gfp_t flags,
3193 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003194{
Akinobu Mita36746432014-06-04 16:06:51 -07003195 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003196 int order;
3197
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003198 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003199 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003200
David Woodhouse5040a912014-03-09 16:14:00 -07003201 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003202 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003203 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3204 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003205 flags |= GFP_DMA;
3206 else
3207 flags |= GFP_DMA32;
3208 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003209
Akinobu Mita36746432014-06-04 16:06:51 -07003210 if (flags & __GFP_WAIT) {
3211 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003212
Akinobu Mita36746432014-06-04 16:06:51 -07003213 page = dma_alloc_from_contiguous(dev, count, order);
3214 if (page && iommu_no_mapping(dev) &&
3215 page_to_phys(page) + size > dev->coherent_dma_mask) {
3216 dma_release_from_contiguous(dev, page, count);
3217 page = NULL;
3218 }
3219 }
3220
3221 if (!page)
3222 page = alloc_pages(flags, order);
3223 if (!page)
3224 return NULL;
3225 memset(page_address(page), 0, size);
3226
3227 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003228 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003229 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003230 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003231 return page_address(page);
3232 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3233 __free_pages(page, order);
3234
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003235 return NULL;
3236}
3237
David Woodhouse5040a912014-03-09 16:14:00 -07003238static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003239 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003240{
3241 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003242 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003243
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003244 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003245 order = get_order(size);
3246
David Woodhouse5040a912014-03-09 16:14:00 -07003247 intel_unmap_page(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Akinobu Mita36746432014-06-04 16:06:51 -07003248 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3249 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003250}
3251
David Woodhouse5040a912014-03-09 16:14:00 -07003252static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003253 int nelems, enum dma_data_direction dir,
3254 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003255{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003256 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003257 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003258 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003259 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003260 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003261
David Woodhouse5040a912014-03-09 16:14:00 -07003262 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003263 return;
3264
David Woodhouse5040a912014-03-09 16:14:00 -07003265 domain = find_domain(dev);
Weidong Han8c11e792008-12-08 15:29:22 +08003266 BUG_ON(!domain);
3267
3268 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003269
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003270 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01003271 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3272 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003273 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003274
David Woodhoused794dc92009-06-28 00:27:49 +01003275 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3276 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003277
David Woodhouseea8ea462014-03-05 17:09:32 +00003278 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003279
David Woodhouseacea0012009-07-14 01:55:11 +01003280 if (intel_iommu_strict) {
3281 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003282 last_pfn - start_pfn + 1, !freelist, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003283 /* free iova */
3284 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003285 dma_free_pagelist(freelist);
David Woodhouseacea0012009-07-14 01:55:11 +01003286 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003287 add_unmap(domain, iova, freelist);
David Woodhouseacea0012009-07-14 01:55:11 +01003288 /*
3289 * queue up the release of the unmap to save the 1/6th of the
3290 * cpu used up by the iotlb flush operation...
3291 */
3292 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003293}
3294
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003295static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003296 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003297{
3298 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003299 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003300
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003301 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003302 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003303 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003304 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003305 }
3306 return nelems;
3307}
3308
David Woodhouse5040a912014-03-09 16:14:00 -07003309static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003310 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003311{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003312 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003313 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003314 size_t size = 0;
3315 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003316 struct iova *iova = NULL;
3317 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003318 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003319 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003320 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003321
3322 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003323 if (iommu_no_mapping(dev))
3324 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003325
David Woodhouse5040a912014-03-09 16:14:00 -07003326 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003327 if (!domain)
3328 return 0;
3329
Weidong Han8c11e792008-12-08 15:29:22 +08003330 iommu = domain_get_iommu(domain);
3331
David Woodhouseb536d242009-06-28 14:49:31 +01003332 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003333 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003334
David Woodhouse5040a912014-03-09 16:14:00 -07003335 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3336 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003337 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003338 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003339 return 0;
3340 }
3341
3342 /*
3343 * Check if DMAR supports zero-length reads on write only
3344 * mappings..
3345 */
3346 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003347 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003348 prot |= DMA_PTE_READ;
3349 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3350 prot |= DMA_PTE_WRITE;
3351
David Woodhouseb536d242009-06-28 14:49:31 +01003352 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003353
Fenghua Yuf5329592009-08-04 15:09:37 -07003354 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003355 if (unlikely(ret)) {
3356 /* clear the page */
3357 dma_pte_clear_range(domain, start_vpfn,
3358 start_vpfn + size - 1);
3359 /* free page tables */
3360 dma_pte_free_pagetable(domain, start_vpfn,
3361 start_vpfn + size - 1);
3362 /* free iova */
3363 __free_iova(&domain->iovad, iova);
3364 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003365 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003366
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003367 /* it's a non-present to present mapping. Only flush if caching mode */
3368 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003369 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003370 else
Weidong Han8c11e792008-12-08 15:29:22 +08003371 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003372
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003373 return nelems;
3374}
3375
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003376static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3377{
3378 return !dma_addr;
3379}
3380
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003381struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003382 .alloc = intel_alloc_coherent,
3383 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003384 .map_sg = intel_map_sg,
3385 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003386 .map_page = intel_map_page,
3387 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003388 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003389};
3390
3391static inline int iommu_domain_cache_init(void)
3392{
3393 int ret = 0;
3394
3395 iommu_domain_cache = kmem_cache_create("iommu_domain",
3396 sizeof(struct dmar_domain),
3397 0,
3398 SLAB_HWCACHE_ALIGN,
3399
3400 NULL);
3401 if (!iommu_domain_cache) {
3402 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3403 ret = -ENOMEM;
3404 }
3405
3406 return ret;
3407}
3408
3409static inline int iommu_devinfo_cache_init(void)
3410{
3411 int ret = 0;
3412
3413 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3414 sizeof(struct device_domain_info),
3415 0,
3416 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003417 NULL);
3418 if (!iommu_devinfo_cache) {
3419 printk(KERN_ERR "Couldn't create devinfo cache\n");
3420 ret = -ENOMEM;
3421 }
3422
3423 return ret;
3424}
3425
3426static inline int iommu_iova_cache_init(void)
3427{
3428 int ret = 0;
3429
3430 iommu_iova_cache = kmem_cache_create("iommu_iova",
3431 sizeof(struct iova),
3432 0,
3433 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003434 NULL);
3435 if (!iommu_iova_cache) {
3436 printk(KERN_ERR "Couldn't create iova cache\n");
3437 ret = -ENOMEM;
3438 }
3439
3440 return ret;
3441}
3442
3443static int __init iommu_init_mempool(void)
3444{
3445 int ret;
3446 ret = iommu_iova_cache_init();
3447 if (ret)
3448 return ret;
3449
3450 ret = iommu_domain_cache_init();
3451 if (ret)
3452 goto domain_error;
3453
3454 ret = iommu_devinfo_cache_init();
3455 if (!ret)
3456 return ret;
3457
3458 kmem_cache_destroy(iommu_domain_cache);
3459domain_error:
3460 kmem_cache_destroy(iommu_iova_cache);
3461
3462 return -ENOMEM;
3463}
3464
3465static void __init iommu_exit_mempool(void)
3466{
3467 kmem_cache_destroy(iommu_devinfo_cache);
3468 kmem_cache_destroy(iommu_domain_cache);
3469 kmem_cache_destroy(iommu_iova_cache);
3470
3471}
3472
Dan Williams556ab452010-07-23 15:47:56 -07003473static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3474{
3475 struct dmar_drhd_unit *drhd;
3476 u32 vtbar;
3477 int rc;
3478
3479 /* We know that this device on this chipset has its own IOMMU.
3480 * If we find it under a different IOMMU, then the BIOS is lying
3481 * to us. Hope that the IOMMU for this device is actually
3482 * disabled, and it needs no translation...
3483 */
3484 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3485 if (rc) {
3486 /* "can't" happen */
3487 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3488 return;
3489 }
3490 vtbar &= 0xffff0000;
3491
3492 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3493 drhd = dmar_find_matched_drhd_unit(pdev);
3494 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3495 TAINT_FIRMWARE_WORKAROUND,
3496 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3497 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3498}
3499DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3500
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003501static void __init init_no_remapping_devices(void)
3502{
3503 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003504 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003505 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003506
3507 for_each_drhd_unit(drhd) {
3508 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003509 for_each_active_dev_scope(drhd->devices,
3510 drhd->devices_cnt, i, dev)
3511 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003512 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003513 if (i == drhd->devices_cnt)
3514 drhd->ignored = 1;
3515 }
3516 }
3517
Jiang Liu7c919772014-01-06 14:18:18 +08003518 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003519 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003520 continue;
3521
Jiang Liub683b232014-02-19 14:07:32 +08003522 for_each_active_dev_scope(drhd->devices,
3523 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003524 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003525 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003526 if (i < drhd->devices_cnt)
3527 continue;
3528
David Woodhousec0771df2011-10-14 20:59:46 +01003529 /* This IOMMU has *only* gfx devices. Either bypass it or
3530 set the gfx_mapped flag, as appropriate */
3531 if (dmar_map_gfx) {
3532 intel_iommu_gfx_mapped = 1;
3533 } else {
3534 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003535 for_each_active_dev_scope(drhd->devices,
3536 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003537 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003538 }
3539 }
3540}
3541
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003542#ifdef CONFIG_SUSPEND
3543static int init_iommu_hw(void)
3544{
3545 struct dmar_drhd_unit *drhd;
3546 struct intel_iommu *iommu = NULL;
3547
3548 for_each_active_iommu(iommu, drhd)
3549 if (iommu->qi)
3550 dmar_reenable_qi(iommu);
3551
Joseph Cihulab7792602011-05-03 00:08:37 -07003552 for_each_iommu(iommu, drhd) {
3553 if (drhd->ignored) {
3554 /*
3555 * we always have to disable PMRs or DMA may fail on
3556 * this device
3557 */
3558 if (force_on)
3559 iommu_disable_protect_mem_regions(iommu);
3560 continue;
3561 }
3562
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003563 iommu_flush_write_buffer(iommu);
3564
3565 iommu_set_root_entry(iommu);
3566
3567 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003568 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003569 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003570 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003571 if (iommu_enable_translation(iommu))
3572 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003573 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003574 }
3575
3576 return 0;
3577}
3578
3579static void iommu_flush_all(void)
3580{
3581 struct dmar_drhd_unit *drhd;
3582 struct intel_iommu *iommu;
3583
3584 for_each_active_iommu(iommu, drhd) {
3585 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003586 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003587 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003588 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003589 }
3590}
3591
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003592static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003593{
3594 struct dmar_drhd_unit *drhd;
3595 struct intel_iommu *iommu = NULL;
3596 unsigned long flag;
3597
3598 for_each_active_iommu(iommu, drhd) {
3599 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3600 GFP_ATOMIC);
3601 if (!iommu->iommu_state)
3602 goto nomem;
3603 }
3604
3605 iommu_flush_all();
3606
3607 for_each_active_iommu(iommu, drhd) {
3608 iommu_disable_translation(iommu);
3609
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003610 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003611
3612 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3613 readl(iommu->reg + DMAR_FECTL_REG);
3614 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3615 readl(iommu->reg + DMAR_FEDATA_REG);
3616 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3617 readl(iommu->reg + DMAR_FEADDR_REG);
3618 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3619 readl(iommu->reg + DMAR_FEUADDR_REG);
3620
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003621 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003622 }
3623 return 0;
3624
3625nomem:
3626 for_each_active_iommu(iommu, drhd)
3627 kfree(iommu->iommu_state);
3628
3629 return -ENOMEM;
3630}
3631
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003632static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003633{
3634 struct dmar_drhd_unit *drhd;
3635 struct intel_iommu *iommu = NULL;
3636 unsigned long flag;
3637
3638 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003639 if (force_on)
3640 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3641 else
3642 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003643 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003644 }
3645
3646 for_each_active_iommu(iommu, drhd) {
3647
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003648 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003649
3650 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3651 iommu->reg + DMAR_FECTL_REG);
3652 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3653 iommu->reg + DMAR_FEDATA_REG);
3654 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3655 iommu->reg + DMAR_FEADDR_REG);
3656 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3657 iommu->reg + DMAR_FEUADDR_REG);
3658
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003659 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003660 }
3661
3662 for_each_active_iommu(iommu, drhd)
3663 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003664}
3665
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003666static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003667 .resume = iommu_resume,
3668 .suspend = iommu_suspend,
3669};
3670
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003671static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003672{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003673 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003674}
3675
3676#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003677static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003678#endif /* CONFIG_PM */
3679
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003680
3681int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3682{
3683 struct acpi_dmar_reserved_memory *rmrr;
3684 struct dmar_rmrr_unit *rmrru;
3685
3686 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3687 if (!rmrru)
3688 return -ENOMEM;
3689
3690 rmrru->hdr = header;
3691 rmrr = (struct acpi_dmar_reserved_memory *)header;
3692 rmrru->base_address = rmrr->base_address;
3693 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003694 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3695 ((void *)rmrr) + rmrr->header.length,
3696 &rmrru->devices_cnt);
3697 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3698 kfree(rmrru);
3699 return -ENOMEM;
3700 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003701
Jiang Liu2e455282014-02-19 14:07:36 +08003702 list_add(&rmrru->list, &dmar_rmrr_units);
3703
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003704 return 0;
3705}
3706
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003707int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3708{
3709 struct acpi_dmar_atsr *atsr;
3710 struct dmar_atsr_unit *atsru;
3711
3712 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3713 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3714 if (!atsru)
3715 return -ENOMEM;
3716
3717 atsru->hdr = hdr;
3718 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08003719 if (!atsru->include_all) {
3720 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3721 (void *)atsr + atsr->header.length,
3722 &atsru->devices_cnt);
3723 if (atsru->devices_cnt && atsru->devices == NULL) {
3724 kfree(atsru);
3725 return -ENOMEM;
3726 }
3727 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003728
Jiang Liu0e242612014-02-19 14:07:34 +08003729 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003730
3731 return 0;
3732}
3733
Jiang Liu9bdc5312014-01-06 14:18:27 +08003734static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3735{
3736 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3737 kfree(atsru);
3738}
3739
3740static void intel_iommu_free_dmars(void)
3741{
3742 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3743 struct dmar_atsr_unit *atsru, *atsr_n;
3744
3745 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3746 list_del(&rmrru->list);
3747 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3748 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003749 }
3750
Jiang Liu9bdc5312014-01-06 14:18:27 +08003751 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3752 list_del(&atsru->list);
3753 intel_iommu_free_atsr(atsru);
3754 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003755}
3756
3757int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3758{
Jiang Liub683b232014-02-19 14:07:32 +08003759 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003760 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00003761 struct pci_dev *bridge = NULL;
3762 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003763 struct acpi_dmar_atsr *atsr;
3764 struct dmar_atsr_unit *atsru;
3765
3766 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003767 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08003768 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003769 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003770 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003771 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003772 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003773 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003774 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08003775 if (!bridge)
3776 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003777
Jiang Liu0e242612014-02-19 14:07:34 +08003778 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08003779 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3780 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3781 if (atsr->segment != pci_domain_nr(dev->bus))
3782 continue;
3783
Jiang Liub683b232014-02-19 14:07:32 +08003784 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00003785 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08003786 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003787
3788 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08003789 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003790 }
Jiang Liub683b232014-02-19 14:07:32 +08003791 ret = 0;
3792out:
Jiang Liu0e242612014-02-19 14:07:34 +08003793 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003794
Jiang Liub683b232014-02-19 14:07:32 +08003795 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003796}
3797
Jiang Liu59ce0512014-02-19 14:07:35 +08003798int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3799{
3800 int ret = 0;
3801 struct dmar_rmrr_unit *rmrru;
3802 struct dmar_atsr_unit *atsru;
3803 struct acpi_dmar_atsr *atsr;
3804 struct acpi_dmar_reserved_memory *rmrr;
3805
3806 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3807 return 0;
3808
3809 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3810 rmrr = container_of(rmrru->hdr,
3811 struct acpi_dmar_reserved_memory, header);
3812 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3813 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3814 ((void *)rmrr) + rmrr->header.length,
3815 rmrr->segment, rmrru->devices,
3816 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08003817 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08003818 return ret;
3819 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08003820 dmar_remove_dev_scope(info, rmrr->segment,
3821 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08003822 }
3823 }
3824
3825 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3826 if (atsru->include_all)
3827 continue;
3828
3829 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3830 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3831 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3832 (void *)atsr + atsr->header.length,
3833 atsr->segment, atsru->devices,
3834 atsru->devices_cnt);
3835 if (ret > 0)
3836 break;
3837 else if(ret < 0)
3838 return ret;
3839 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3840 if (dmar_remove_dev_scope(info, atsr->segment,
3841 atsru->devices, atsru->devices_cnt))
3842 break;
3843 }
3844 }
3845
3846 return 0;
3847}
3848
Fenghua Yu99dcade2009-11-11 07:23:06 -08003849/*
3850 * Here we only respond to action of unbound device from driver.
3851 *
3852 * Added device is not attached to its DMAR domain here yet. That will happen
3853 * when mapping the device to iova.
3854 */
3855static int device_notifier(struct notifier_block *nb,
3856 unsigned long action, void *data)
3857{
3858 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08003859 struct dmar_domain *domain;
3860
David Woodhouse3d891942014-03-06 15:59:26 +00003861 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00003862 return 0;
3863
Jiang Liu7e7dfab2014-02-19 14:07:23 +08003864 if (action != BUS_NOTIFY_UNBOUND_DRIVER &&
3865 action != BUS_NOTIFY_DEL_DEVICE)
3866 return 0;
3867
David Woodhouse1525a292014-03-06 16:19:30 +00003868 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08003869 if (!domain)
3870 return 0;
3871
Jiang Liu3a5670e2014-02-19 14:07:33 +08003872 down_read(&dmar_global_lock);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07003873 domain_remove_one_dev_info(domain, dev);
Jiang Liu7e7dfab2014-02-19 14:07:23 +08003874 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3875 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3876 list_empty(&domain->devices))
3877 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08003878 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07003879
Fenghua Yu99dcade2009-11-11 07:23:06 -08003880 return 0;
3881}
3882
3883static struct notifier_block device_nb = {
3884 .notifier_call = device_notifier,
3885};
3886
Jiang Liu75f05562014-02-19 14:07:37 +08003887static int intel_iommu_memory_notifier(struct notifier_block *nb,
3888 unsigned long val, void *v)
3889{
3890 struct memory_notify *mhp = v;
3891 unsigned long long start, end;
3892 unsigned long start_vpfn, last_vpfn;
3893
3894 switch (val) {
3895 case MEM_GOING_ONLINE:
3896 start = mhp->start_pfn << PAGE_SHIFT;
3897 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
3898 if (iommu_domain_identity_map(si_domain, start, end)) {
3899 pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
3900 start, end);
3901 return NOTIFY_BAD;
3902 }
3903 break;
3904
3905 case MEM_OFFLINE:
3906 case MEM_CANCEL_ONLINE:
3907 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
3908 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
3909 while (start_vpfn <= last_vpfn) {
3910 struct iova *iova;
3911 struct dmar_drhd_unit *drhd;
3912 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003913 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08003914
3915 iova = find_iova(&si_domain->iovad, start_vpfn);
3916 if (iova == NULL) {
3917 pr_debug("dmar: failed get IOVA for PFN %lx\n",
3918 start_vpfn);
3919 break;
3920 }
3921
3922 iova = split_and_remove_iova(&si_domain->iovad, iova,
3923 start_vpfn, last_vpfn);
3924 if (iova == NULL) {
3925 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
3926 start_vpfn, last_vpfn);
3927 return NOTIFY_BAD;
3928 }
3929
David Woodhouseea8ea462014-03-05 17:09:32 +00003930 freelist = domain_unmap(si_domain, iova->pfn_lo,
3931 iova->pfn_hi);
3932
Jiang Liu75f05562014-02-19 14:07:37 +08003933 rcu_read_lock();
3934 for_each_active_iommu(iommu, drhd)
3935 iommu_flush_iotlb_psi(iommu, si_domain->id,
3936 iova->pfn_lo,
David Woodhouseea8ea462014-03-05 17:09:32 +00003937 iova->pfn_hi - iova->pfn_lo + 1,
3938 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08003939 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00003940 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08003941
3942 start_vpfn = iova->pfn_hi + 1;
3943 free_iova_mem(iova);
3944 }
3945 break;
3946 }
3947
3948 return NOTIFY_OK;
3949}
3950
3951static struct notifier_block intel_iommu_memory_nb = {
3952 .notifier_call = intel_iommu_memory_notifier,
3953 .priority = 0
3954};
3955
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003956int __init intel_iommu_init(void)
3957{
Jiang Liu9bdc5312014-01-06 14:18:27 +08003958 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09003959 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08003960 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003961
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003962 /* VT-d is required for a TXT/tboot launch, so enforce that */
3963 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003964
Jiang Liu3a5670e2014-02-19 14:07:33 +08003965 if (iommu_init_mempool()) {
3966 if (force_on)
3967 panic("tboot: Failed to initialize iommu memory\n");
3968 return -ENOMEM;
3969 }
3970
3971 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003972 if (dmar_table_init()) {
3973 if (force_on)
3974 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003975 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003976 }
3977
Takao Indoh3a93c842013-04-23 17:35:03 +09003978 /*
3979 * Disable translation if already enabled prior to OS handover.
3980 */
Jiang Liu7c919772014-01-06 14:18:18 +08003981 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09003982 if (iommu->gcmd & DMA_GCMD_TE)
3983 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09003984
Suresh Siddhac2c72862011-08-23 17:05:19 -07003985 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003986 if (force_on)
3987 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003988 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003989 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003990
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003991 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08003992 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07003993
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003994 if (list_empty(&dmar_rmrr_units))
3995 printk(KERN_INFO "DMAR: No RMRR found\n");
3996
3997 if (list_empty(&dmar_atsr_units))
3998 printk(KERN_INFO "DMAR: No ATSR found\n");
3999
Joseph Cihula51a63e62011-03-21 11:04:24 -07004000 if (dmar_init_reserved_ranges()) {
4001 if (force_on)
4002 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004003 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004004 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004005
4006 init_no_remapping_devices();
4007
Joseph Cihulab7792602011-05-03 00:08:37 -07004008 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004009 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004010 if (force_on)
4011 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004012 printk(KERN_ERR "IOMMU: dmar init failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004013 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004014 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004015 up_write(&dmar_global_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004016 printk(KERN_INFO
4017 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4018
mark gross5e0d2a62008-03-04 15:22:08 -08004019 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004020#ifdef CONFIG_SWIOTLB
4021 swiotlb = 0;
4022#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004023 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004024
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004025 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004026
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004027 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004028 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004029 if (si_domain && !hw_pass_through)
4030 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004031
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004032 intel_iommu_enabled = 1;
4033
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004034 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004035
4036out_free_reserved_range:
4037 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004038out_free_dmar:
4039 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004040 up_write(&dmar_global_lock);
4041 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004042 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004043}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004044
Han, Weidong3199aa62009-02-26 17:31:12 +08004045static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004046 struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004047{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004048 struct pci_dev *tmp, *parent, *pdev;
Han, Weidong3199aa62009-02-26 17:31:12 +08004049
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004050 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004051 return;
4052
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004053 pdev = to_pci_dev(dev);
4054
Han, Weidong3199aa62009-02-26 17:31:12 +08004055 /* dependent device detach */
4056 tmp = pci_find_upstream_pcie_bridge(pdev);
4057 /* Secondary interface's bus number and devfn 0 */
4058 if (tmp) {
4059 parent = pdev->bus->self;
4060 while (parent != tmp) {
4061 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01004062 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08004063 parent = parent->bus->self;
4064 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05004065 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08004066 iommu_detach_dev(iommu,
4067 tmp->subordinate->number, 0);
4068 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01004069 iommu_detach_dev(iommu, tmp->bus->number,
4070 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08004071 }
4072}
4073
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004074static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004075 struct device *dev)
Weidong Hanc7151a82008-12-08 22:51:37 +08004076{
Yijing Wangbca2b912013-10-31 17:26:04 +08004077 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08004078 struct intel_iommu *iommu;
4079 unsigned long flags;
4080 int found = 0;
David Woodhouse156baca2014-03-09 14:00:57 -07004081 u8 bus, devfn;
Weidong Hanc7151a82008-12-08 22:51:37 +08004082
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004083 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08004084 if (!iommu)
4085 return;
4086
4087 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08004088 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004089 if (info->iommu == iommu && info->bus == bus &&
4090 info->devfn == devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01004091 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004092 spin_unlock_irqrestore(&device_domain_lock, flags);
4093
Yu Zhao93a23a72009-05-18 13:51:37 +08004094 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004095 iommu_detach_dev(iommu, info->bus, info->devfn);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004096 iommu_detach_dependent_devices(iommu, dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08004097 free_devinfo_mem(info);
4098
4099 spin_lock_irqsave(&device_domain_lock, flags);
4100
4101 if (found)
4102 break;
4103 else
4104 continue;
4105 }
4106
4107 /* if there is no other devices under the same iommu
4108 * owned by this domain, clear this iommu in iommu_bmp
4109 * update iommu count and coherency
4110 */
David Woodhouse8bbc4412014-03-09 13:52:37 -07004111 if (info->iommu == iommu)
Weidong Hanc7151a82008-12-08 22:51:37 +08004112 found = 1;
4113 }
4114
Roland Dreier3e7abe22011-07-20 06:22:21 -07004115 spin_unlock_irqrestore(&device_domain_lock, flags);
4116
Weidong Hanc7151a82008-12-08 22:51:37 +08004117 if (found == 0) {
4118 unsigned long tmp_flags;
4119 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08004120 clear_bit(iommu->seq_id, domain->iommu_bmp);
Weidong Hanc7151a82008-12-08 22:51:37 +08004121 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08004122 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08004123 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07004124
Alex Williamson9b4554b2011-05-24 12:19:04 -04004125 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
4126 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
4127 spin_lock_irqsave(&iommu->lock, tmp_flags);
4128 clear_bit(domain->id, iommu->domain_ids);
4129 iommu->domains[domain->id] = NULL;
4130 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
4131 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004132 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004133}
4134
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004135static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08004136{
4137 int adjust_width;
4138
4139 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004140 domain_reserve_special_ranges(domain);
4141
4142 /* calculate AGAW */
4143 domain->gaw = guest_width;
4144 adjust_width = guestwidth_to_adjustwidth(guest_width);
4145 domain->agaw = width_to_agaw(adjust_width);
4146
Weidong Han5e98c4b2008-12-08 23:03:27 +08004147 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004148 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004149 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004150 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004151
4152 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004153 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004154 if (!domain->pgd)
4155 return -ENOMEM;
4156 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4157 return 0;
4158}
4159
Joerg Roedel5d450802008-12-03 14:52:32 +01004160static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004161{
Joerg Roedel5d450802008-12-03 14:52:32 +01004162 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004163
Jiang Liu92d03cc2014-02-19 14:07:28 +08004164 dmar_domain = alloc_domain(true);
Joerg Roedel5d450802008-12-03 14:52:32 +01004165 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03004166 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004167 "intel_iommu_domain_init: dmar_domain == NULL\n");
4168 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004169 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004170 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03004171 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004172 "intel_iommu_domain_init() failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004173 domain_exit(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004174 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004175 }
Allen Kay8140a952011-10-14 12:32:17 -07004176 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004177 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004178
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004179 domain->geometry.aperture_start = 0;
4180 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4181 domain->geometry.force_aperture = true;
4182
Joerg Roedel5d450802008-12-03 14:52:32 +01004183 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004184}
Kay, Allen M38717942008-09-09 18:37:29 +03004185
Joerg Roedel5d450802008-12-03 14:52:32 +01004186static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004187{
Joerg Roedel5d450802008-12-03 14:52:32 +01004188 struct dmar_domain *dmar_domain = domain->priv;
4189
4190 domain->priv = NULL;
Jiang Liu92d03cc2014-02-19 14:07:28 +08004191 domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004192}
Kay, Allen M38717942008-09-09 18:37:29 +03004193
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004194static int intel_iommu_attach_device(struct iommu_domain *domain,
4195 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004196{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004197 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004198 struct intel_iommu *iommu;
4199 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004200 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004201
David Woodhouse7207d8f2014-03-09 16:31:06 -07004202 /* normally dev is not mapped */
4203 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004204 struct dmar_domain *old_domain;
4205
David Woodhouse1525a292014-03-06 16:19:30 +00004206 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004207 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004208 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
4209 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004210 domain_remove_one_dev_info(old_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004211 else
4212 domain_remove_dev_info(old_domain);
4213 }
4214 }
4215
David Woodhouse156baca2014-03-09 14:00:57 -07004216 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004217 if (!iommu)
4218 return -ENODEV;
4219
4220 /* check if this iommu agaw is sufficient for max mapped address */
4221 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004222 if (addr_width > cap_mgaw(iommu->cap))
4223 addr_width = cap_mgaw(iommu->cap);
4224
4225 if (dmar_domain->max_addr > (1LL << addr_width)) {
4226 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004227 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004228 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004229 return -EFAULT;
4230 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004231 dmar_domain->gaw = addr_width;
4232
4233 /*
4234 * Knock out extra levels of page tables if necessary
4235 */
4236 while (iommu->agaw < dmar_domain->agaw) {
4237 struct dma_pte *pte;
4238
4239 pte = dmar_domain->pgd;
4240 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004241 dmar_domain->pgd = (struct dma_pte *)
4242 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004243 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004244 }
4245 dmar_domain->agaw--;
4246 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004247
David Woodhouse5913c9b2014-03-09 16:27:31 -07004248 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004249}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004250
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004251static void intel_iommu_detach_device(struct iommu_domain *domain,
4252 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004253{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004254 struct dmar_domain *dmar_domain = domain->priv;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004255
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004256 domain_remove_one_dev_info(dmar_domain, dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004257}
Kay, Allen M38717942008-09-09 18:37:29 +03004258
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004259static int intel_iommu_map(struct iommu_domain *domain,
4260 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004261 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004262{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004263 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004264 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004265 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004266 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004267
Joerg Roedeldde57a22008-12-03 15:04:09 +01004268 if (iommu_prot & IOMMU_READ)
4269 prot |= DMA_PTE_READ;
4270 if (iommu_prot & IOMMU_WRITE)
4271 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004272 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4273 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004274
David Woodhouse163cc522009-06-28 00:51:17 +01004275 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004276 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004277 u64 end;
4278
4279 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004280 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004281 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004282 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004283 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004284 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004285 return -EFAULT;
4286 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004287 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004288 }
David Woodhousead051222009-06-28 14:22:28 +01004289 /* Round up size to next multiple of PAGE_SIZE, if it and
4290 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004291 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004292 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4293 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004294 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004295}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004296
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004297static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004298 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004299{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004300 struct dmar_domain *dmar_domain = domain->priv;
David Woodhouseea8ea462014-03-05 17:09:32 +00004301 struct page *freelist = NULL;
4302 struct intel_iommu *iommu;
4303 unsigned long start_pfn, last_pfn;
4304 unsigned int npages;
4305 int iommu_id, num, ndomains, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004306
David Woodhouse5cf0a762014-03-19 16:07:49 +00004307 /* Cope with horrid API which requires us to unmap more than the
4308 size argument if it happens to be a large-page mapping. */
4309 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4310 BUG();
4311
4312 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4313 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4314
David Woodhouseea8ea462014-03-05 17:09:32 +00004315 start_pfn = iova >> VTD_PAGE_SHIFT;
4316 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4317
4318 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4319
4320 npages = last_pfn - start_pfn + 1;
4321
4322 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4323 iommu = g_iommus[iommu_id];
4324
4325 /*
4326 * find bit position of dmar_domain
4327 */
4328 ndomains = cap_ndoms(iommu->cap);
4329 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4330 if (iommu->domains[num] == dmar_domain)
4331 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4332 npages, !freelist, 0);
4333 }
4334
4335 }
4336
4337 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004338
David Woodhouse163cc522009-06-28 00:51:17 +01004339 if (dmar_domain->max_addr == iova + size)
4340 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004341
David Woodhouse5cf0a762014-03-19 16:07:49 +00004342 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004343}
Kay, Allen M38717942008-09-09 18:37:29 +03004344
Joerg Roedeld14d6572008-12-03 15:06:57 +01004345static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05304346 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004347{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004348 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004349 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004350 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004351 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004352
David Woodhouse5cf0a762014-03-19 16:07:49 +00004353 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004354 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004355 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004356
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004357 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004358}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004359
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004360static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4361 unsigned long cap)
4362{
4363 struct dmar_domain *dmar_domain = domain->priv;
4364
4365 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4366 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004367 if (cap == IOMMU_CAP_INTR_REMAP)
Suresh Siddha95a02e92012-03-30 11:47:07 -07004368 return irq_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004369
4370 return 0;
4371}
4372
Alex Williamson783f1572012-05-30 14:19:43 -06004373#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4374
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004375static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004376{
4377 struct pci_dev *pdev = to_pci_dev(dev);
Alex Williamson3da4af0a2012-11-13 10:22:03 -07004378 struct pci_dev *bridge, *dma_pdev = NULL;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004379 struct iommu_group *group;
4380 int ret;
David Woodhouse156baca2014-03-09 14:00:57 -07004381 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004382
David Woodhouse156baca2014-03-09 14:00:57 -07004383 if (!device_to_iommu(dev, &bus, &devfn))
Alex Williamson70ae6f02011-10-21 15:56:11 -04004384 return -ENODEV;
4385
4386 bridge = pci_find_upstream_pcie_bridge(pdev);
4387 if (bridge) {
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004388 if (pci_is_pcie(bridge))
4389 dma_pdev = pci_get_domain_bus_and_slot(
4390 pci_domain_nr(pdev->bus),
4391 bridge->subordinate->number, 0);
Alex Williamson3da4af0a2012-11-13 10:22:03 -07004392 if (!dma_pdev)
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004393 dma_pdev = pci_dev_get(bridge);
4394 } else
4395 dma_pdev = pci_dev_get(pdev);
4396
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004397 /* Account for quirked devices */
Alex Williamson783f1572012-05-30 14:19:43 -06004398 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
4399
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004400 /*
4401 * If it's a multifunction device that does not support our
Alex Williamsonc14d2692013-05-30 12:39:18 -06004402 * required ACS flags, add to the same group as lowest numbered
4403 * function that also does not suport the required ACS flags.
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004404 */
Alex Williamson783f1572012-05-30 14:19:43 -06004405 if (dma_pdev->multifunction &&
Alex Williamsonc14d2692013-05-30 12:39:18 -06004406 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
4407 u8 i, slot = PCI_SLOT(dma_pdev->devfn);
4408
4409 for (i = 0; i < 8; i++) {
4410 struct pci_dev *tmp;
4411
4412 tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
4413 if (!tmp)
4414 continue;
4415
4416 if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
4417 swap_pci_ref(&dma_pdev, tmp);
4418 break;
4419 }
4420 pci_dev_put(tmp);
4421 }
4422 }
Alex Williamson783f1572012-05-30 14:19:43 -06004423
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004424 /*
4425 * Devices on the root bus go through the iommu. If that's not us,
4426 * find the next upstream device and test ACS up to the root bus.
4427 * Finding the next device may require skipping virtual buses.
4428 */
Alex Williamson783f1572012-05-30 14:19:43 -06004429 while (!pci_is_root_bus(dma_pdev->bus)) {
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004430 struct pci_bus *bus = dma_pdev->bus;
4431
4432 while (!bus->self) {
4433 if (!pci_is_root_bus(bus))
4434 bus = bus->parent;
4435 else
4436 goto root_bus;
4437 }
4438
4439 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
Alex Williamson783f1572012-05-30 14:19:43 -06004440 break;
4441
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004442 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
Alex Williamson70ae6f02011-10-21 15:56:11 -04004443 }
4444
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004445root_bus:
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004446 group = iommu_group_get(&dma_pdev->dev);
4447 pci_dev_put(dma_pdev);
4448 if (!group) {
4449 group = iommu_group_alloc();
4450 if (IS_ERR(group))
4451 return PTR_ERR(group);
4452 }
Alex Williamsonbcb71ab2011-10-21 15:56:24 -04004453
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004454 ret = iommu_group_add_device(group, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004455
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004456 iommu_group_put(group);
4457 return ret;
4458}
4459
4460static void intel_iommu_remove_device(struct device *dev)
4461{
4462 iommu_group_remove_device(dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004463}
4464
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004465static struct iommu_ops intel_iommu_ops = {
4466 .domain_init = intel_iommu_domain_init,
4467 .domain_destroy = intel_iommu_domain_destroy,
4468 .attach_dev = intel_iommu_attach_device,
4469 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004470 .map = intel_iommu_map,
4471 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004472 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004473 .domain_has_cap = intel_iommu_domain_has_cap,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004474 .add_device = intel_iommu_add_device,
4475 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004476 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004477};
David Woodhouse9af88142009-02-13 23:18:03 +00004478
Daniel Vetter94526182013-01-20 23:50:13 +01004479static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4480{
4481 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4482 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4483 dmar_map_gfx = 0;
4484}
4485
4486DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4487DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4488DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4489DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4490DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4491DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4492DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4493
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004494static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004495{
4496 /*
4497 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004498 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004499 */
4500 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4501 rwbf_quirk = 1;
4502}
4503
4504DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004505DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4506DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4507DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4508DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4509DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4510DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004511
Adam Jacksoneecfd572010-08-25 21:17:34 +01004512#define GGC 0x52
4513#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4514#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4515#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4516#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4517#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4518#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4519#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4520#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4521
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004522static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004523{
4524 unsigned short ggc;
4525
Adam Jacksoneecfd572010-08-25 21:17:34 +01004526 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004527 return;
4528
Adam Jacksoneecfd572010-08-25 21:17:34 +01004529 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004530 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4531 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004532 } else if (dmar_map_gfx) {
4533 /* we have to ensure the gfx device is idle before we flush */
4534 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4535 intel_iommu_strict = 1;
4536 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004537}
4538DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4539DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4540DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4541DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4542
David Woodhousee0fc7e02009-09-30 09:12:17 -07004543/* On Tylersburg chipsets, some BIOSes have been known to enable the
4544 ISOCH DMAR unit for the Azalia sound device, but not give it any
4545 TLB entries, which causes it to deadlock. Check for that. We do
4546 this in a function called from init_dmars(), instead of in a PCI
4547 quirk, because we don't want to print the obnoxious "BIOS broken"
4548 message if VT-d is actually disabled.
4549*/
4550static void __init check_tylersburg_isoch(void)
4551{
4552 struct pci_dev *pdev;
4553 uint32_t vtisochctrl;
4554
4555 /* If there's no Azalia in the system anyway, forget it. */
4556 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4557 if (!pdev)
4558 return;
4559 pci_dev_put(pdev);
4560
4561 /* System Management Registers. Might be hidden, in which case
4562 we can't do the sanity check. But that's OK, because the
4563 known-broken BIOSes _don't_ actually hide it, so far. */
4564 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4565 if (!pdev)
4566 return;
4567
4568 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4569 pci_dev_put(pdev);
4570 return;
4571 }
4572
4573 pci_dev_put(pdev);
4574
4575 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4576 if (vtisochctrl & 1)
4577 return;
4578
4579 /* Drop all bits other than the number of TLB entries */
4580 vtisochctrl &= 0x1c;
4581
4582 /* If we have the recommended number of TLB entries (16), fine. */
4583 if (vtisochctrl == 0x10)
4584 return;
4585
4586 /* Zero TLB entries? You get to ride the short bus to school. */
4587 if (!vtisochctrl) {
4588 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4589 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4590 dmi_get_system_info(DMI_BIOS_VENDOR),
4591 dmi_get_system_info(DMI_BIOS_VERSION),
4592 dmi_get_system_info(DMI_PRODUCT_VERSION));
4593 iommu_identity_mapping |= IDENTMAP_AZALIA;
4594 return;
4595 }
4596
4597 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4598 vtisochctrl);
4599}