blob: 6bb32773c3ac3471ab7bf439d9191be757bacb5c [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070018 */
19
20#include <linux/init.h>
21#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080022#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040023#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070024#include <linux/slab.h>
25#include <linux/irq.h>
26#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/spinlock.h>
28#include <linux/pci.h>
29#include <linux/dmar.h>
30#include <linux/dma-mapping.h>
31#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080032#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080033#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030034#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010035#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010037#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100039#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020040#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080041#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070042#include <linux/dma-contiguous.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070043#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070044#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090045#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046
Joerg Roedel078e1ee2012-09-26 12:44:43 +020047#include "irq_remapping.h"
Varun Sethi61e015a2013-04-23 10:05:24 +053048#include "pci.h"
Joerg Roedel078e1ee2012-09-26 12:44:43 +020049
Fenghua Yu5b6985c2008-10-16 18:02:32 -070050#define ROOT_SIZE VTD_PAGE_SIZE
51#define CONTEXT_SIZE VTD_PAGE_SIZE
52
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070053#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
54#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070055#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070056
57#define IOAPIC_RANGE_START (0xfee00000)
58#define IOAPIC_RANGE_END (0xfeefffff)
59#define IOVA_START_ADDR (0x1000)
60
61#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
62
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070063#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080064#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070065
David Woodhouse2ebe3152009-09-19 07:34:04 -070066#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
67#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
68
69/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
70 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
71#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
72 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
73#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070074
Mark McLoughlinf27be032008-11-20 15:49:43 +000075#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070076#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070077#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080078
Andrew Mortondf08cdc2010-09-22 13:05:11 -070079/* page table handling */
80#define LEVEL_STRIDE (9)
81#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
82
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020083/*
84 * This bitmap is used to advertise the page sizes our hardware support
85 * to the IOMMU core, which will then use this information to split
86 * physically contiguous memory regions it is mapping into page sizes
87 * that we support.
88 *
89 * Traditionally the IOMMU core just handed us the mappings directly,
90 * after making sure the size is an order of a 4KiB page and that the
91 * mapping has natural alignment.
92 *
93 * To retain this behavior, we currently advertise that we support
94 * all page sizes that are an order of 4KiB.
95 *
96 * If at some point we'd like to utilize the IOMMU core's new behavior,
97 * we could change this to advertise the real page sizes we support.
98 */
99#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
100
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700101static inline int agaw_to_level(int agaw)
102{
103 return agaw + 2;
104}
105
106static inline int agaw_to_width(int agaw)
107{
Jiang Liu5c645b32014-01-06 14:18:12 +0800108 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700109}
110
111static inline int width_to_agaw(int width)
112{
Jiang Liu5c645b32014-01-06 14:18:12 +0800113 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700114}
115
116static inline unsigned int level_to_offset_bits(int level)
117{
118 return (level - 1) * LEVEL_STRIDE;
119}
120
121static inline int pfn_level_offset(unsigned long pfn, int level)
122{
123 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
124}
125
126static inline unsigned long level_mask(int level)
127{
128 return -1UL << level_to_offset_bits(level);
129}
130
131static inline unsigned long level_size(int level)
132{
133 return 1UL << level_to_offset_bits(level);
134}
135
136static inline unsigned long align_to_level(unsigned long pfn, int level)
137{
138 return (pfn + level_size(level) - 1) & level_mask(level);
139}
David Woodhousefd18de52009-05-10 23:57:41 +0100140
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100141static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
142{
Jiang Liu5c645b32014-01-06 14:18:12 +0800143 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100144}
145
David Woodhousedd4e8312009-06-27 16:21:20 +0100146/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
147 are never going to work. */
148static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
149{
150 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
151}
152
153static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
154{
155 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
156}
157static inline unsigned long page_to_dma_pfn(struct page *pg)
158{
159 return mm_to_dma_pfn(page_to_pfn(pg));
160}
161static inline unsigned long virt_to_dma_pfn(void *p)
162{
163 return page_to_dma_pfn(virt_to_page(p));
164}
165
Weidong Hand9630fe2008-12-08 11:06:32 +0800166/* global iommu list, set NULL for ignored DMAR units */
167static struct intel_iommu **g_iommus;
168
David Woodhousee0fc7e02009-09-30 09:12:17 -0700169static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000170static int rwbf_quirk;
171
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000172/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700173 * set to 1 to panic kernel if can't successfully enable VT-d
174 * (used when kernel is launched w/ TXT)
175 */
176static int force_on = 0;
177
178/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000179 * 0: Present
180 * 1-11: Reserved
181 * 12-63: Context Ptr (12 - (haw-1))
182 * 64-127: Reserved
183 */
184struct root_entry {
185 u64 val;
186 u64 rsvd1;
187};
188#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
189static inline bool root_present(struct root_entry *root)
190{
191 return (root->val & 1);
192}
193static inline void set_root_present(struct root_entry *root)
194{
195 root->val |= 1;
196}
197static inline void set_root_value(struct root_entry *root, unsigned long value)
198{
199 root->val |= value & VTD_PAGE_MASK;
200}
201
202static inline struct context_entry *
203get_context_addr_from_root(struct root_entry *root)
204{
205 return (struct context_entry *)
206 (root_present(root)?phys_to_virt(
207 root->val & VTD_PAGE_MASK) :
208 NULL);
209}
210
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000211/*
212 * low 64 bits:
213 * 0: present
214 * 1: fault processing disable
215 * 2-3: translation type
216 * 12-63: address space root
217 * high 64 bits:
218 * 0-2: address width
219 * 3-6: aval
220 * 8-23: domain id
221 */
222struct context_entry {
223 u64 lo;
224 u64 hi;
225};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000226
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000227static inline bool context_present(struct context_entry *context)
228{
229 return (context->lo & 1);
230}
231static inline void context_set_present(struct context_entry *context)
232{
233 context->lo |= 1;
234}
235
236static inline void context_set_fault_enable(struct context_entry *context)
237{
238 context->lo &= (((u64)-1) << 2) | 1;
239}
240
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000241static inline void context_set_translation_type(struct context_entry *context,
242 unsigned long value)
243{
244 context->lo &= (((u64)-1) << 4) | 3;
245 context->lo |= (value & 3) << 2;
246}
247
248static inline void context_set_address_root(struct context_entry *context,
249 unsigned long value)
250{
251 context->lo |= value & VTD_PAGE_MASK;
252}
253
254static inline void context_set_address_width(struct context_entry *context,
255 unsigned long value)
256{
257 context->hi |= value & 7;
258}
259
260static inline void context_set_domain_id(struct context_entry *context,
261 unsigned long value)
262{
263 context->hi |= (value & ((1 << 16) - 1)) << 8;
264}
265
266static inline void context_clear_entry(struct context_entry *context)
267{
268 context->lo = 0;
269 context->hi = 0;
270}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000271
Mark McLoughlin622ba122008-11-20 15:49:46 +0000272/*
273 * 0: readable
274 * 1: writable
275 * 2-6: reserved
276 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800277 * 8-10: available
278 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000279 * 12-63: Host physcial address
280 */
281struct dma_pte {
282 u64 val;
283};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000284
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000285static inline void dma_clear_pte(struct dma_pte *pte)
286{
287 pte->val = 0;
288}
289
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000290static inline u64 dma_pte_addr(struct dma_pte *pte)
291{
David Woodhousec85994e2009-07-01 19:21:24 +0100292#ifdef CONFIG_64BIT
293 return pte->val & VTD_PAGE_MASK;
294#else
295 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100296 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100297#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000298}
299
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000300static inline bool dma_pte_present(struct dma_pte *pte)
301{
302 return (pte->val & 3) != 0;
303}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000304
Allen Kay4399c8b2011-10-14 12:32:46 -0700305static inline bool dma_pte_superpage(struct dma_pte *pte)
306{
307 return (pte->val & (1 << 7));
308}
309
David Woodhouse75e6bf92009-07-02 11:21:16 +0100310static inline int first_pte_in_page(struct dma_pte *pte)
311{
312 return !((unsigned long)pte & ~VTD_PAGE_MASK);
313}
314
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700315/*
316 * This domain is a statically identity mapping domain.
317 * 1. This domain creats a static 1:1 mapping to all usable memory.
318 * 2. It maps to each iommu if successful.
319 * 3. Each iommu mapps to this domain if successful.
320 */
David Woodhouse19943b02009-08-04 16:19:20 +0100321static struct dmar_domain *si_domain;
322static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700323
Weidong Han3b5410e2008-12-08 09:17:15 +0800324/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100325#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800326
Weidong Han1ce28fe2008-12-08 16:35:39 +0800327/* domain represents a virtual machine, more than one devices
328 * across iommus may be owned in one domain, e.g. kvm guest.
329 */
330#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
331
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700332/* si_domain contains mulitple devices */
333#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
334
Mike Travis1b198bb2012-03-05 15:05:16 -0800335/* define the limit of IOMMUs supported in each domain */
336#ifdef CONFIG_X86
337# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
338#else
339# define IOMMU_UNITS_SUPPORTED 64
340#endif
341
Mark McLoughlin99126f72008-11-20 15:49:47 +0000342struct dmar_domain {
343 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700344 int nid; /* node id */
Mike Travis1b198bb2012-03-05 15:05:16 -0800345 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
346 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000347
348 struct list_head devices; /* all devices' list */
349 struct iova_domain iovad; /* iova's that belong to this domain */
350
351 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000352 int gaw; /* max guest address width */
353
354 /* adjusted guest address width, 0 is level 2 30-bit */
355 int agaw;
356
Weidong Han3b5410e2008-12-08 09:17:15 +0800357 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800358
359 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800360 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800361 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100362 int iommu_superpage;/* Level of superpages supported:
363 0 == 4KiB (no superpages), 1 == 2MiB,
364 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800365 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800366 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000367};
368
Mark McLoughlina647dac2008-11-20 15:49:48 +0000369/* PCI domain-device relationship */
370struct device_domain_info {
371 struct list_head link; /* link to domain siblings */
372 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100373 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000374 u8 devfn; /* PCI devfn number */
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000375 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800376 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000377 struct dmar_domain *domain; /* pointer to domain */
378};
379
Jiang Liub94e4112014-02-19 14:07:25 +0800380struct dmar_rmrr_unit {
381 struct list_head list; /* list of rmrr units */
382 struct acpi_dmar_header *hdr; /* ACPI header */
383 u64 base_address; /* reserved base address*/
384 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000385 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800386 int devices_cnt; /* target device count */
387};
388
389struct dmar_atsr_unit {
390 struct list_head list; /* list of ATSR units */
391 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000392 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800393 int devices_cnt; /* target device count */
394 u8 include_all:1; /* include all ports */
395};
396
397static LIST_HEAD(dmar_atsr_units);
398static LIST_HEAD(dmar_rmrr_units);
399
400#define for_each_rmrr_units(rmrr) \
401 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
402
mark gross5e0d2a62008-03-04 15:22:08 -0800403static void flush_unmaps_timeout(unsigned long data);
404
Jiang Liub707cb02014-01-06 14:18:26 +0800405static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800406
mark gross80b20dd2008-04-18 13:53:58 -0700407#define HIGH_WATER_MARK 250
408struct deferred_flush_tables {
409 int next;
410 struct iova *iova[HIGH_WATER_MARK];
411 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000412 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700413};
414
415static struct deferred_flush_tables *deferred_flush;
416
mark gross5e0d2a62008-03-04 15:22:08 -0800417/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800418static int g_num_of_iommus;
419
420static DEFINE_SPINLOCK(async_umap_flush_lock);
421static LIST_HEAD(unmaps_to_do);
422
423static int timer_on;
424static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800425
Jiang Liu92d03cc2014-02-19 14:07:28 +0800426static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700427static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800428static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -0700429 struct device *dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800430static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000431 struct device *dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700432
Suresh Siddhad3f13812011-08-23 17:05:25 -0700433#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800434int dmar_disabled = 0;
435#else
436int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700437#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800438
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200439int intel_iommu_enabled = 0;
440EXPORT_SYMBOL_GPL(intel_iommu_enabled);
441
David Woodhouse2d9e6672010-06-15 10:57:57 +0100442static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700443static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800444static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100445static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700446
David Woodhousec0771df2011-10-14 20:59:46 +0100447int intel_iommu_gfx_mapped;
448EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
449
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700450#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
451static DEFINE_SPINLOCK(device_domain_lock);
452static LIST_HEAD(device_domain_list);
453
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100454static struct iommu_ops intel_iommu_ops;
455
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700456static int __init intel_iommu_setup(char *str)
457{
458 if (!str)
459 return -EINVAL;
460 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800461 if (!strncmp(str, "on", 2)) {
462 dmar_disabled = 0;
463 printk(KERN_INFO "Intel-IOMMU: enabled\n");
464 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700465 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800466 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700467 } else if (!strncmp(str, "igfx_off", 8)) {
468 dmar_map_gfx = 0;
469 printk(KERN_INFO
470 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700471 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800472 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700473 "Intel-IOMMU: Forcing DAC for PCI devices\n");
474 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800475 } else if (!strncmp(str, "strict", 6)) {
476 printk(KERN_INFO
477 "Intel-IOMMU: disable batched IOTLB flush\n");
478 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100479 } else if (!strncmp(str, "sp_off", 6)) {
480 printk(KERN_INFO
481 "Intel-IOMMU: disable supported super page\n");
482 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700483 }
484
485 str += strcspn(str, ",");
486 while (*str == ',')
487 str++;
488 }
489 return 0;
490}
491__setup("intel_iommu=", intel_iommu_setup);
492
493static struct kmem_cache *iommu_domain_cache;
494static struct kmem_cache *iommu_devinfo_cache;
495static struct kmem_cache *iommu_iova_cache;
496
Suresh Siddha4c923d42009-10-02 11:01:24 -0700497static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700498{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700499 struct page *page;
500 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700501
Suresh Siddha4c923d42009-10-02 11:01:24 -0700502 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
503 if (page)
504 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700505 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700506}
507
508static inline void free_pgtable_page(void *vaddr)
509{
510 free_page((unsigned long)vaddr);
511}
512
513static inline void *alloc_domain_mem(void)
514{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900515 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700516}
517
Kay, Allen M38717942008-09-09 18:37:29 +0300518static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700519{
520 kmem_cache_free(iommu_domain_cache, vaddr);
521}
522
523static inline void * alloc_devinfo_mem(void)
524{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900525 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700526}
527
528static inline void free_devinfo_mem(void *vaddr)
529{
530 kmem_cache_free(iommu_devinfo_cache, vaddr);
531}
532
533struct iova *alloc_iova_mem(void)
534{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900535 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700536}
537
538void free_iova_mem(struct iova *iova)
539{
540 kmem_cache_free(iommu_iova_cache, iova);
541}
542
Weidong Han1b573682008-12-08 15:34:06 +0800543
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700544static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800545{
546 unsigned long sagaw;
547 int agaw = -1;
548
549 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700550 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800551 agaw >= 0; agaw--) {
552 if (test_bit(agaw, &sagaw))
553 break;
554 }
555
556 return agaw;
557}
558
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700559/*
560 * Calculate max SAGAW for each iommu.
561 */
562int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
563{
564 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
565}
566
567/*
568 * calculate agaw for each iommu.
569 * "SAGAW" may be different across iommus, use a default agaw, and
570 * get a supported less agaw for iommus that don't support the default agaw.
571 */
572int iommu_calculate_agaw(struct intel_iommu *iommu)
573{
574 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
575}
576
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700577/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800578static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
579{
580 int iommu_id;
581
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700582 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800583 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700584 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800585
Mike Travis1b198bb2012-03-05 15:05:16 -0800586 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800587 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
588 return NULL;
589
590 return g_iommus[iommu_id];
591}
592
Weidong Han8e6040972008-12-08 15:49:06 +0800593static void domain_update_iommu_coherency(struct dmar_domain *domain)
594{
David Woodhoused0501962014-03-11 17:10:29 -0700595 struct dmar_drhd_unit *drhd;
596 struct intel_iommu *iommu;
597 int i, found = 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800598
David Woodhoused0501962014-03-11 17:10:29 -0700599 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800600
Mike Travis1b198bb2012-03-05 15:05:16 -0800601 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
David Woodhoused0501962014-03-11 17:10:29 -0700602 found = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800603 if (!ecap_coherent(g_iommus[i]->ecap)) {
604 domain->iommu_coherency = 0;
605 break;
606 }
Weidong Han8e6040972008-12-08 15:49:06 +0800607 }
David Woodhoused0501962014-03-11 17:10:29 -0700608 if (found)
609 return;
610
611 /* No hardware attached; use lowest common denominator */
612 rcu_read_lock();
613 for_each_active_iommu(iommu, drhd) {
614 if (!ecap_coherent(iommu->ecap)) {
615 domain->iommu_coherency = 0;
616 break;
617 }
618 }
619 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800620}
621
Sheng Yang58c610b2009-03-18 15:33:05 +0800622static void domain_update_iommu_snooping(struct dmar_domain *domain)
623{
624 int i;
625
626 domain->iommu_snooping = 1;
627
Mike Travis1b198bb2012-03-05 15:05:16 -0800628 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800629 if (!ecap_sc_support(g_iommus[i]->ecap)) {
630 domain->iommu_snooping = 0;
631 break;
632 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800633 }
634}
635
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100636static void domain_update_iommu_superpage(struct dmar_domain *domain)
637{
Allen Kay8140a952011-10-14 12:32:17 -0700638 struct dmar_drhd_unit *drhd;
639 struct intel_iommu *iommu = NULL;
640 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100641
642 if (!intel_iommu_superpage) {
643 domain->iommu_superpage = 0;
644 return;
645 }
646
Allen Kay8140a952011-10-14 12:32:17 -0700647 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800648 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700649 for_each_active_iommu(iommu, drhd) {
650 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100651 if (!mask) {
652 break;
653 }
654 }
Jiang Liu0e242612014-02-19 14:07:34 +0800655 rcu_read_unlock();
656
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100657 domain->iommu_superpage = fls(mask);
658}
659
Sheng Yang58c610b2009-03-18 15:33:05 +0800660/* Some capabilities may be different across iommus */
661static void domain_update_iommu_cap(struct dmar_domain *domain)
662{
663 domain_update_iommu_coherency(domain);
664 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100665 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800666}
667
David Woodhouse156baca2014-03-09 14:00:57 -0700668static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800669{
670 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800671 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700672 struct device *tmp;
673 struct pci_dev *ptmp, *pdev = NULL;
674 u16 segment;
Weidong Hanc7151a82008-12-08 22:51:37 +0800675 int i;
676
David Woodhouse156baca2014-03-09 14:00:57 -0700677 if (dev_is_pci(dev)) {
678 pdev = to_pci_dev(dev);
679 segment = pci_domain_nr(pdev->bus);
680 } else if (ACPI_COMPANION(dev))
681 dev = &ACPI_COMPANION(dev)->dev;
682
Jiang Liu0e242612014-02-19 14:07:34 +0800683 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800684 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700685 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100686 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800687
Jiang Liub683b232014-02-19 14:07:32 +0800688 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700689 drhd->devices_cnt, i, tmp) {
690 if (tmp == dev) {
691 *bus = drhd->devices[i].bus;
692 *devfn = drhd->devices[i].devfn;
693 goto out;
694 }
695
696 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000697 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700698
699 ptmp = to_pci_dev(tmp);
700 if (ptmp->subordinate &&
701 ptmp->subordinate->number <= pdev->bus->number &&
702 ptmp->subordinate->busn_res.end >= pdev->bus->number)
703 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100704 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800705
David Woodhouse156baca2014-03-09 14:00:57 -0700706 if (pdev && drhd->include_all) {
707 got_pdev:
708 *bus = pdev->bus->number;
709 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800710 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700711 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800712 }
Jiang Liub683b232014-02-19 14:07:32 +0800713 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700714 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800715 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800716
Jiang Liub683b232014-02-19 14:07:32 +0800717 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800718}
719
Weidong Han5331fe62008-12-08 23:00:00 +0800720static void domain_flush_cache(struct dmar_domain *domain,
721 void *addr, int size)
722{
723 if (!domain->iommu_coherency)
724 clflush_cache_range(addr, size);
725}
726
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700727/* Gets context entry for a given bus and devfn */
728static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
729 u8 bus, u8 devfn)
730{
731 struct root_entry *root;
732 struct context_entry *context;
733 unsigned long phy_addr;
734 unsigned long flags;
735
736 spin_lock_irqsave(&iommu->lock, flags);
737 root = &iommu->root_entry[bus];
738 context = get_context_addr_from_root(root);
739 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700740 context = (struct context_entry *)
741 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700742 if (!context) {
743 spin_unlock_irqrestore(&iommu->lock, flags);
744 return NULL;
745 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700746 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700747 phy_addr = virt_to_phys((void *)context);
748 set_root_value(root, phy_addr);
749 set_root_present(root);
750 __iommu_flush_cache(iommu, root, sizeof(*root));
751 }
752 spin_unlock_irqrestore(&iommu->lock, flags);
753 return &context[devfn];
754}
755
756static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
757{
758 struct root_entry *root;
759 struct context_entry *context;
760 int ret;
761 unsigned long flags;
762
763 spin_lock_irqsave(&iommu->lock, flags);
764 root = &iommu->root_entry[bus];
765 context = get_context_addr_from_root(root);
766 if (!context) {
767 ret = 0;
768 goto out;
769 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000770 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700771out:
772 spin_unlock_irqrestore(&iommu->lock, flags);
773 return ret;
774}
775
776static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
777{
778 struct root_entry *root;
779 struct context_entry *context;
780 unsigned long flags;
781
782 spin_lock_irqsave(&iommu->lock, flags);
783 root = &iommu->root_entry[bus];
784 context = get_context_addr_from_root(root);
785 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000786 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700787 __iommu_flush_cache(iommu, &context[devfn], \
788 sizeof(*context));
789 }
790 spin_unlock_irqrestore(&iommu->lock, flags);
791}
792
793static void free_context_table(struct intel_iommu *iommu)
794{
795 struct root_entry *root;
796 int i;
797 unsigned long flags;
798 struct context_entry *context;
799
800 spin_lock_irqsave(&iommu->lock, flags);
801 if (!iommu->root_entry) {
802 goto out;
803 }
804 for (i = 0; i < ROOT_ENTRY_NR; i++) {
805 root = &iommu->root_entry[i];
806 context = get_context_addr_from_root(root);
807 if (context)
808 free_pgtable_page(context);
809 }
810 free_pgtable_page(iommu->root_entry);
811 iommu->root_entry = NULL;
812out:
813 spin_unlock_irqrestore(&iommu->lock, flags);
814}
815
David Woodhouseb026fd22009-06-28 10:37:25 +0100816static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000817 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700818{
David Woodhouseb026fd22009-06-28 10:37:25 +0100819 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700820 struct dma_pte *parent, *pte = NULL;
821 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700822 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700823
824 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200825
826 if (addr_width < BITS_PER_LONG && pfn >> addr_width)
827 /* Address beyond IOMMU's addressing capabilities. */
828 return NULL;
829
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700830 parent = domain->pgd;
831
David Woodhouse5cf0a762014-03-19 16:07:49 +0000832 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700833 void *tmp_page;
834
David Woodhouseb026fd22009-06-28 10:37:25 +0100835 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700836 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000837 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100838 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000839 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700840 break;
841
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000842 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100843 uint64_t pteval;
844
Suresh Siddha4c923d42009-10-02 11:01:24 -0700845 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700846
David Woodhouse206a73c12009-07-01 19:30:28 +0100847 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700848 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100849
David Woodhousec85994e2009-07-01 19:21:24 +0100850 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400851 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100852 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
853 /* Someone else set it while we were thinking; use theirs. */
854 free_pgtable_page(tmp_page);
855 } else {
856 dma_pte_addr(pte);
857 domain_flush_cache(domain, pte, sizeof(*pte));
858 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700859 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000860 if (level == 1)
861 break;
862
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000863 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700864 level--;
865 }
866
David Woodhouse5cf0a762014-03-19 16:07:49 +0000867 if (!*target_level)
868 *target_level = level;
869
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700870 return pte;
871}
872
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100873
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700874/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100875static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
876 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100877 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700878{
879 struct dma_pte *parent, *pte = NULL;
880 int total = agaw_to_level(domain->agaw);
881 int offset;
882
883 parent = domain->pgd;
884 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100885 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700886 pte = &parent[offset];
887 if (level == total)
888 return pte;
889
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100890 if (!dma_pte_present(pte)) {
891 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700892 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100893 }
894
895 if (pte->val & DMA_PTE_LARGE_PAGE) {
896 *large_page = total;
897 return pte;
898 }
899
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000900 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700901 total--;
902 }
903 return NULL;
904}
905
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700906/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +0000907static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100908 unsigned long start_pfn,
909 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700910{
David Woodhouse04b18e62009-06-27 19:15:01 +0100911 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100912 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100913 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700914
David Woodhouse04b18e62009-06-27 19:15:01 +0100915 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100916 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700917 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100918
David Woodhouse04b18e62009-06-27 19:15:01 +0100919 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700920 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100921 large_page = 1;
922 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100923 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100924 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100925 continue;
926 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100927 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100928 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100929 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100930 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100931 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
932
David Woodhouse310a5ab2009-06-28 18:52:20 +0100933 domain_flush_cache(domain, first_pte,
934 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700935
936 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700937}
938
Alex Williamson3269ee02013-06-15 10:27:19 -0600939static void dma_pte_free_level(struct dmar_domain *domain, int level,
940 struct dma_pte *pte, unsigned long pfn,
941 unsigned long start_pfn, unsigned long last_pfn)
942{
943 pfn = max(start_pfn, pfn);
944 pte = &pte[pfn_level_offset(pfn, level)];
945
946 do {
947 unsigned long level_pfn;
948 struct dma_pte *level_pte;
949
950 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
951 goto next;
952
953 level_pfn = pfn & level_mask(level - 1);
954 level_pte = phys_to_virt(dma_pte_addr(pte));
955
956 if (level > 2)
957 dma_pte_free_level(domain, level - 1, level_pte,
958 level_pfn, start_pfn, last_pfn);
959
960 /* If range covers entire pagetable, free it */
961 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -0800962 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -0600963 dma_clear_pte(pte);
964 domain_flush_cache(domain, pte, sizeof(*pte));
965 free_pgtable_page(level_pte);
966 }
967next:
968 pfn += level_size(level);
969 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
970}
971
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700972/* free page table pages. last level pte should already be cleared */
973static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100974 unsigned long start_pfn,
975 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700976{
David Woodhouse6660c632009-06-27 22:41:00 +0100977 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700978
David Woodhouse6660c632009-06-27 22:41:00 +0100979 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
980 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700981 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700982
David Woodhousef3a0a522009-06-30 03:40:07 +0100983 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -0600984 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
985 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +0100986
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700987 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100988 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700989 free_pgtable_page(domain->pgd);
990 domain->pgd = NULL;
991 }
992}
993
David Woodhouseea8ea462014-03-05 17:09:32 +0000994/* When a page at a given level is being unlinked from its parent, we don't
995 need to *modify* it at all. All we need to do is make a list of all the
996 pages which can be freed just as soon as we've flushed the IOTLB and we
997 know the hardware page-walk will no longer touch them.
998 The 'pte' argument is the *parent* PTE, pointing to the page that is to
999 be freed. */
1000static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1001 int level, struct dma_pte *pte,
1002 struct page *freelist)
1003{
1004 struct page *pg;
1005
1006 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1007 pg->freelist = freelist;
1008 freelist = pg;
1009
1010 if (level == 1)
1011 return freelist;
1012
Jiang Liuadeb2592014-04-09 10:20:39 +08001013 pte = page_address(pg);
1014 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001015 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1016 freelist = dma_pte_list_pagetables(domain, level - 1,
1017 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001018 pte++;
1019 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001020
1021 return freelist;
1022}
1023
1024static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1025 struct dma_pte *pte, unsigned long pfn,
1026 unsigned long start_pfn,
1027 unsigned long last_pfn,
1028 struct page *freelist)
1029{
1030 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1031
1032 pfn = max(start_pfn, pfn);
1033 pte = &pte[pfn_level_offset(pfn, level)];
1034
1035 do {
1036 unsigned long level_pfn;
1037
1038 if (!dma_pte_present(pte))
1039 goto next;
1040
1041 level_pfn = pfn & level_mask(level);
1042
1043 /* If range covers entire pagetable, free it */
1044 if (start_pfn <= level_pfn &&
1045 last_pfn >= level_pfn + level_size(level) - 1) {
1046 /* These suborbinate page tables are going away entirely. Don't
1047 bother to clear them; we're just going to *free* them. */
1048 if (level > 1 && !dma_pte_superpage(pte))
1049 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1050
1051 dma_clear_pte(pte);
1052 if (!first_pte)
1053 first_pte = pte;
1054 last_pte = pte;
1055 } else if (level > 1) {
1056 /* Recurse down into a level that isn't *entirely* obsolete */
1057 freelist = dma_pte_clear_level(domain, level - 1,
1058 phys_to_virt(dma_pte_addr(pte)),
1059 level_pfn, start_pfn, last_pfn,
1060 freelist);
1061 }
1062next:
1063 pfn += level_size(level);
1064 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1065
1066 if (first_pte)
1067 domain_flush_cache(domain, first_pte,
1068 (void *)++last_pte - (void *)first_pte);
1069
1070 return freelist;
1071}
1072
1073/* We can't just free the pages because the IOMMU may still be walking
1074 the page tables, and may have cached the intermediate levels. The
1075 pages can only be freed after the IOTLB flush has been done. */
1076struct page *domain_unmap(struct dmar_domain *domain,
1077 unsigned long start_pfn,
1078 unsigned long last_pfn)
1079{
1080 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1081 struct page *freelist = NULL;
1082
1083 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
1084 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
1085 BUG_ON(start_pfn > last_pfn);
1086
1087 /* we don't need lock here; nobody else touches the iova range */
1088 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1089 domain->pgd, 0, start_pfn, last_pfn, NULL);
1090
1091 /* free pgd */
1092 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1093 struct page *pgd_page = virt_to_page(domain->pgd);
1094 pgd_page->freelist = freelist;
1095 freelist = pgd_page;
1096
1097 domain->pgd = NULL;
1098 }
1099
1100 return freelist;
1101}
1102
1103void dma_free_pagelist(struct page *freelist)
1104{
1105 struct page *pg;
1106
1107 while ((pg = freelist)) {
1108 freelist = pg->freelist;
1109 free_pgtable_page(page_address(pg));
1110 }
1111}
1112
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001113/* iommu handling */
1114static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1115{
1116 struct root_entry *root;
1117 unsigned long flags;
1118
Suresh Siddha4c923d42009-10-02 11:01:24 -07001119 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001120 if (!root)
1121 return -ENOMEM;
1122
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001123 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001124
1125 spin_lock_irqsave(&iommu->lock, flags);
1126 iommu->root_entry = root;
1127 spin_unlock_irqrestore(&iommu->lock, flags);
1128
1129 return 0;
1130}
1131
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001132static void iommu_set_root_entry(struct intel_iommu *iommu)
1133{
1134 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001135 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001136 unsigned long flag;
1137
1138 addr = iommu->root_entry;
1139
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001140 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001141 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
1142
David Woodhousec416daa2009-05-10 20:30:58 +01001143 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001144
1145 /* Make sure hardware complete it */
1146 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001147 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001148
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001149 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001150}
1151
1152static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1153{
1154 u32 val;
1155 unsigned long flag;
1156
David Woodhouse9af88142009-02-13 23:18:03 +00001157 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001158 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001159
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001160 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001161 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001162
1163 /* Make sure hardware complete it */
1164 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001165 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001166
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001167 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001168}
1169
1170/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001171static void __iommu_flush_context(struct intel_iommu *iommu,
1172 u16 did, u16 source_id, u8 function_mask,
1173 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001174{
1175 u64 val = 0;
1176 unsigned long flag;
1177
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001178 switch (type) {
1179 case DMA_CCMD_GLOBAL_INVL:
1180 val = DMA_CCMD_GLOBAL_INVL;
1181 break;
1182 case DMA_CCMD_DOMAIN_INVL:
1183 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1184 break;
1185 case DMA_CCMD_DEVICE_INVL:
1186 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1187 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1188 break;
1189 default:
1190 BUG();
1191 }
1192 val |= DMA_CCMD_ICC;
1193
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001194 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001195 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1196
1197 /* Make sure hardware complete it */
1198 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1199 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1200
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001201 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001202}
1203
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001204/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001205static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1206 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001207{
1208 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1209 u64 val = 0, val_iva = 0;
1210 unsigned long flag;
1211
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001212 switch (type) {
1213 case DMA_TLB_GLOBAL_FLUSH:
1214 /* global flush doesn't need set IVA_REG */
1215 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1216 break;
1217 case DMA_TLB_DSI_FLUSH:
1218 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1219 break;
1220 case DMA_TLB_PSI_FLUSH:
1221 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001222 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001223 val_iva = size_order | addr;
1224 break;
1225 default:
1226 BUG();
1227 }
1228 /* Note: set drain read/write */
1229#if 0
1230 /*
1231 * This is probably to be super secure.. Looks like we can
1232 * ignore it without any impact.
1233 */
1234 if (cap_read_drain(iommu->cap))
1235 val |= DMA_TLB_READ_DRAIN;
1236#endif
1237 if (cap_write_drain(iommu->cap))
1238 val |= DMA_TLB_WRITE_DRAIN;
1239
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001240 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001241 /* Note: Only uses first TLB reg currently */
1242 if (val_iva)
1243 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1244 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1245
1246 /* Make sure hardware complete it */
1247 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1248 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1249
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001250 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001251
1252 /* check IOTLB invalidation granularity */
1253 if (DMA_TLB_IAIG(val) == 0)
1254 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1255 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1256 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001257 (unsigned long long)DMA_TLB_IIRG(type),
1258 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001259}
1260
David Woodhouse64ae8922014-03-09 12:52:30 -07001261static struct device_domain_info *
1262iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1263 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001264{
Yu Zhao93a23a72009-05-18 13:51:37 +08001265 int found = 0;
1266 unsigned long flags;
1267 struct device_domain_info *info;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001268 struct pci_dev *pdev;
Yu Zhao93a23a72009-05-18 13:51:37 +08001269
1270 if (!ecap_dev_iotlb_support(iommu->ecap))
1271 return NULL;
1272
1273 if (!iommu->qi)
1274 return NULL;
1275
1276 spin_lock_irqsave(&device_domain_lock, flags);
1277 list_for_each_entry(info, &domain->devices, link)
1278 if (info->bus == bus && info->devfn == devfn) {
1279 found = 1;
1280 break;
1281 }
1282 spin_unlock_irqrestore(&device_domain_lock, flags);
1283
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001284 if (!found || !info->dev || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001285 return NULL;
1286
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001287 pdev = to_pci_dev(info->dev);
1288
1289 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Yu Zhao93a23a72009-05-18 13:51:37 +08001290 return NULL;
1291
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001292 if (!dmar_find_matched_atsr_unit(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001293 return NULL;
1294
Yu Zhao93a23a72009-05-18 13:51:37 +08001295 return info;
1296}
1297
1298static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1299{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001300 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001301 return;
1302
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001303 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Yu Zhao93a23a72009-05-18 13:51:37 +08001304}
1305
1306static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1307{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001308 if (!info->dev || !dev_is_pci(info->dev) ||
1309 !pci_ats_enabled(to_pci_dev(info->dev)))
Yu Zhao93a23a72009-05-18 13:51:37 +08001310 return;
1311
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001312 pci_disable_ats(to_pci_dev(info->dev));
Yu Zhao93a23a72009-05-18 13:51:37 +08001313}
1314
1315static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1316 u64 addr, unsigned mask)
1317{
1318 u16 sid, qdep;
1319 unsigned long flags;
1320 struct device_domain_info *info;
1321
1322 spin_lock_irqsave(&device_domain_lock, flags);
1323 list_for_each_entry(info, &domain->devices, link) {
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001324 struct pci_dev *pdev;
1325 if (!info->dev || !dev_is_pci(info->dev))
1326 continue;
1327
1328 pdev = to_pci_dev(info->dev);
1329 if (!pci_ats_enabled(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001330 continue;
1331
1332 sid = info->bus << 8 | info->devfn;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001333 qdep = pci_ats_queue_depth(pdev);
Yu Zhao93a23a72009-05-18 13:51:37 +08001334 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1335 }
1336 spin_unlock_irqrestore(&device_domain_lock, flags);
1337}
1338
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001339static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouseea8ea462014-03-05 17:09:32 +00001340 unsigned long pfn, unsigned int pages, int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001341{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001342 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001343 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001344
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001345 BUG_ON(pages == 0);
1346
David Woodhouseea8ea462014-03-05 17:09:32 +00001347 if (ih)
1348 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001349 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001350 * Fallback to domain selective flush if no PSI support or the size is
1351 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001352 * PSI requires page size to be 2 ^ x, and the base address is naturally
1353 * aligned to the size
1354 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001355 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1356 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001357 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001358 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001359 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001360 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001361
1362 /*
Nadav Amit82653632010-04-01 13:24:40 +03001363 * In caching mode, changes of pages from non-present to present require
1364 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001365 */
Nadav Amit82653632010-04-01 13:24:40 +03001366 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001367 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001368}
1369
mark grossf8bab732008-02-08 04:18:38 -08001370static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1371{
1372 u32 pmen;
1373 unsigned long flags;
1374
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001375 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001376 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1377 pmen &= ~DMA_PMEN_EPM;
1378 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1379
1380 /* wait for the protected region status bit to clear */
1381 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1382 readl, !(pmen & DMA_PMEN_PRS), pmen);
1383
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001384 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001385}
1386
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001387static int iommu_enable_translation(struct intel_iommu *iommu)
1388{
1389 u32 sts;
1390 unsigned long flags;
1391
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001392 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001393 iommu->gcmd |= DMA_GCMD_TE;
1394 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001395
1396 /* Make sure hardware complete it */
1397 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001398 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001399
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001400 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001401 return 0;
1402}
1403
1404static int iommu_disable_translation(struct intel_iommu *iommu)
1405{
1406 u32 sts;
1407 unsigned long flag;
1408
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001409 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001410 iommu->gcmd &= ~DMA_GCMD_TE;
1411 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1412
1413 /* Make sure hardware complete it */
1414 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001415 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001416
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001417 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001418 return 0;
1419}
1420
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001421
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001422static int iommu_init_domains(struct intel_iommu *iommu)
1423{
1424 unsigned long ndomains;
1425 unsigned long nlongs;
1426
1427 ndomains = cap_ndoms(iommu->cap);
Jiang Liu852bdb02014-01-06 14:18:11 +08001428 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1429 iommu->seq_id, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001430 nlongs = BITS_TO_LONGS(ndomains);
1431
Donald Dutile94a91b52009-08-20 16:51:34 -04001432 spin_lock_init(&iommu->lock);
1433
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001434 /* TBD: there might be 64K domains,
1435 * consider other allocation for future chip
1436 */
1437 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1438 if (!iommu->domain_ids) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001439 pr_err("IOMMU%d: allocating domain id array failed\n",
1440 iommu->seq_id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001441 return -ENOMEM;
1442 }
1443 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1444 GFP_KERNEL);
1445 if (!iommu->domains) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001446 pr_err("IOMMU%d: allocating domain array failed\n",
1447 iommu->seq_id);
1448 kfree(iommu->domain_ids);
1449 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001450 return -ENOMEM;
1451 }
1452
1453 /*
1454 * if Caching mode is set, then invalid translations are tagged
1455 * with domainid 0. Hence we need to pre-allocate it.
1456 */
1457 if (cap_caching_mode(iommu->cap))
1458 set_bit(0, iommu->domain_ids);
1459 return 0;
1460}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001461
Jiang Liua868e6b2014-01-06 14:18:20 +08001462static void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001463{
1464 struct dmar_domain *domain;
Jiang Liu5ced12a2014-01-06 14:18:22 +08001465 int i, count;
Weidong Hanc7151a82008-12-08 22:51:37 +08001466 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001467
Donald Dutile94a91b52009-08-20 16:51:34 -04001468 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001469 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001470 /*
1471 * Domain id 0 is reserved for invalid translation
1472 * if hardware supports caching mode.
1473 */
1474 if (cap_caching_mode(iommu->cap) && i == 0)
1475 continue;
1476
Donald Dutile94a91b52009-08-20 16:51:34 -04001477 domain = iommu->domains[i];
1478 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001479
Donald Dutile94a91b52009-08-20 16:51:34 -04001480 spin_lock_irqsave(&domain->iommu_lock, flags);
Jiang Liu5ced12a2014-01-06 14:18:22 +08001481 count = --domain->iommu_count;
1482 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001483 if (count == 0)
1484 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001485 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001486 }
1487
1488 if (iommu->gcmd & DMA_GCMD_TE)
1489 iommu_disable_translation(iommu);
1490
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001491 kfree(iommu->domains);
1492 kfree(iommu->domain_ids);
Jiang Liua868e6b2014-01-06 14:18:20 +08001493 iommu->domains = NULL;
1494 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001495
Weidong Hand9630fe2008-12-08 11:06:32 +08001496 g_iommus[iommu->seq_id] = NULL;
1497
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001498 /* free context mapping */
1499 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001500}
1501
Jiang Liu92d03cc2014-02-19 14:07:28 +08001502static struct dmar_domain *alloc_domain(bool vm)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001503{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001504 /* domain id for virtual machine, it won't be set in context */
1505 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001506 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001507
1508 domain = alloc_domain_mem();
1509 if (!domain)
1510 return NULL;
1511
Suresh Siddha4c923d42009-10-02 11:01:24 -07001512 domain->nid = -1;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001513 domain->iommu_count = 0;
Mike Travis1b198bb2012-03-05 15:05:16 -08001514 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Hand71a2f32008-12-07 21:13:41 +08001515 domain->flags = 0;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001516 spin_lock_init(&domain->iommu_lock);
1517 INIT_LIST_HEAD(&domain->devices);
1518 if (vm) {
1519 domain->id = atomic_inc_return(&vm_domid);
1520 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
1521 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001522
1523 return domain;
1524}
1525
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001526static int iommu_attach_domain(struct dmar_domain *domain,
1527 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001528{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001529 int num;
1530 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001531 unsigned long flags;
1532
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001533 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001534
1535 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001536
1537 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1538 if (num >= ndomains) {
1539 spin_unlock_irqrestore(&iommu->lock, flags);
1540 printk(KERN_ERR "IOMMU: no free domain ids\n");
1541 return -ENOMEM;
1542 }
1543
1544 domain->id = num;
Jiang Liu9ebd6822014-02-19 14:07:29 +08001545 domain->iommu_count++;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001546 set_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001547 set_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001548 iommu->domains[num] = domain;
1549 spin_unlock_irqrestore(&iommu->lock, flags);
1550
1551 return 0;
1552}
1553
1554static void iommu_detach_domain(struct dmar_domain *domain,
1555 struct intel_iommu *iommu)
1556{
1557 unsigned long flags;
1558 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001559
1560 spin_lock_irqsave(&iommu->lock, flags);
1561 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001562 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001563 if (iommu->domains[num] == domain) {
Jiang Liu92d03cc2014-02-19 14:07:28 +08001564 clear_bit(num, iommu->domain_ids);
1565 iommu->domains[num] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001566 break;
1567 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001568 }
Weidong Han8c11e792008-12-08 15:29:22 +08001569 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001570}
1571
1572static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001573static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001574
Joseph Cihula51a63e62011-03-21 11:04:24 -07001575static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001576{
1577 struct pci_dev *pdev = NULL;
1578 struct iova *iova;
1579 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001580
David Millerf6611972008-02-06 01:36:23 -08001581 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001582
Mark Gross8a443df2008-03-04 14:59:31 -08001583 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1584 &reserved_rbtree_key);
1585
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001586 /* IOAPIC ranges shouldn't be accessed by DMA */
1587 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1588 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001589 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001590 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001591 return -ENODEV;
1592 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001593
1594 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1595 for_each_pci_dev(pdev) {
1596 struct resource *r;
1597
1598 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1599 r = &pdev->resource[i];
1600 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1601 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001602 iova = reserve_iova(&reserved_iova_list,
1603 IOVA_PFN(r->start),
1604 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001605 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001606 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001607 return -ENODEV;
1608 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001609 }
1610 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001611 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001612}
1613
1614static void domain_reserve_special_ranges(struct dmar_domain *domain)
1615{
1616 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1617}
1618
1619static inline int guestwidth_to_adjustwidth(int gaw)
1620{
1621 int agaw;
1622 int r = (gaw - 12) % 9;
1623
1624 if (r == 0)
1625 agaw = gaw;
1626 else
1627 agaw = gaw + 9 - r;
1628 if (agaw > 64)
1629 agaw = 64;
1630 return agaw;
1631}
1632
1633static int domain_init(struct dmar_domain *domain, int guest_width)
1634{
1635 struct intel_iommu *iommu;
1636 int adjust_width, agaw;
1637 unsigned long sagaw;
1638
David Millerf6611972008-02-06 01:36:23 -08001639 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001640 domain_reserve_special_ranges(domain);
1641
1642 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001643 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001644 if (guest_width > cap_mgaw(iommu->cap))
1645 guest_width = cap_mgaw(iommu->cap);
1646 domain->gaw = guest_width;
1647 adjust_width = guestwidth_to_adjustwidth(guest_width);
1648 agaw = width_to_agaw(adjust_width);
1649 sagaw = cap_sagaw(iommu->cap);
1650 if (!test_bit(agaw, &sagaw)) {
1651 /* hardware doesn't support it, choose a bigger one */
1652 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1653 agaw = find_next_bit(&sagaw, 5, agaw);
1654 if (agaw >= 5)
1655 return -ENODEV;
1656 }
1657 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001658
Weidong Han8e6040972008-12-08 15:49:06 +08001659 if (ecap_coherent(iommu->ecap))
1660 domain->iommu_coherency = 1;
1661 else
1662 domain->iommu_coherency = 0;
1663
Sheng Yang58c610b2009-03-18 15:33:05 +08001664 if (ecap_sc_support(iommu->ecap))
1665 domain->iommu_snooping = 1;
1666 else
1667 domain->iommu_snooping = 0;
1668
David Woodhouse214e39a2014-03-19 10:38:49 +00001669 if (intel_iommu_superpage)
1670 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1671 else
1672 domain->iommu_superpage = 0;
1673
Suresh Siddha4c923d42009-10-02 11:01:24 -07001674 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001675
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001676 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001677 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001678 if (!domain->pgd)
1679 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001680 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001681 return 0;
1682}
1683
1684static void domain_exit(struct dmar_domain *domain)
1685{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001686 struct dmar_drhd_unit *drhd;
1687 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00001688 struct page *freelist = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001689
1690 /* Domain 0 is reserved, so dont process it */
1691 if (!domain)
1692 return;
1693
Alex Williamson7b668352011-05-24 12:02:41 +01001694 /* Flush any lazy unmaps that may reference this domain */
1695 if (!intel_iommu_strict)
1696 flush_unmaps_timeout(0);
1697
Jiang Liu92d03cc2014-02-19 14:07:28 +08001698 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001699 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001700
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001701 /* destroy iovas */
1702 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001703
David Woodhouseea8ea462014-03-05 17:09:32 +00001704 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001705
Jiang Liu92d03cc2014-02-19 14:07:28 +08001706 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001707 rcu_read_lock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001708 for_each_active_iommu(iommu, drhd)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001709 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1710 test_bit(iommu->seq_id, domain->iommu_bmp))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001711 iommu_detach_domain(domain, iommu);
Jiang Liu0e242612014-02-19 14:07:34 +08001712 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001713
David Woodhouseea8ea462014-03-05 17:09:32 +00001714 dma_free_pagelist(freelist);
1715
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001716 free_domain_mem(domain);
1717}
1718
David Woodhouse64ae8922014-03-09 12:52:30 -07001719static int domain_context_mapping_one(struct dmar_domain *domain,
1720 struct intel_iommu *iommu,
1721 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001722{
1723 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001724 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001725 struct dma_pte *pgd;
1726 unsigned long num;
1727 unsigned long ndomains;
1728 int id;
1729 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001730 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001731
1732 pr_debug("Set context mapping for %02x:%02x.%d\n",
1733 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001734
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001735 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001736 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1737 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001738
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001739 context = device_to_context_entry(iommu, bus, devfn);
1740 if (!context)
1741 return -ENOMEM;
1742 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001743 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001744 spin_unlock_irqrestore(&iommu->lock, flags);
1745 return 0;
1746 }
1747
Weidong Hanea6606b2008-12-08 23:08:15 +08001748 id = domain->id;
1749 pgd = domain->pgd;
1750
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001751 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1752 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001753 int found = 0;
1754
1755 /* find an available domain id for this device in iommu */
1756 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001757 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001758 if (iommu->domains[num] == domain) {
1759 id = num;
1760 found = 1;
1761 break;
1762 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001763 }
1764
1765 if (found == 0) {
1766 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1767 if (num >= ndomains) {
1768 spin_unlock_irqrestore(&iommu->lock, flags);
1769 printk(KERN_ERR "IOMMU: no free domain ids\n");
1770 return -EFAULT;
1771 }
1772
1773 set_bit(num, iommu->domain_ids);
1774 iommu->domains[num] = domain;
1775 id = num;
1776 }
1777
1778 /* Skip top levels of page tables for
1779 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001780 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001781 */
Chris Wright1672af12009-12-02 12:06:34 -08001782 if (translation != CONTEXT_TT_PASS_THROUGH) {
1783 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1784 pgd = phys_to_virt(dma_pte_addr(pgd));
1785 if (!dma_pte_present(pgd)) {
1786 spin_unlock_irqrestore(&iommu->lock, flags);
1787 return -ENOMEM;
1788 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001789 }
1790 }
1791 }
1792
1793 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001794
Yu Zhao93a23a72009-05-18 13:51:37 +08001795 if (translation != CONTEXT_TT_PASS_THROUGH) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001796 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Yu Zhao93a23a72009-05-18 13:51:37 +08001797 translation = info ? CONTEXT_TT_DEV_IOTLB :
1798 CONTEXT_TT_MULTI_LEVEL;
1799 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001800 /*
1801 * In pass through mode, AW must be programmed to indicate the largest
1802 * AGAW value supported by hardware. And ASR is ignored by hardware.
1803 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001804 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001805 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001806 else {
1807 context_set_address_root(context, virt_to_phys(pgd));
1808 context_set_address_width(context, iommu->agaw);
1809 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001810
1811 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001812 context_set_fault_enable(context);
1813 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001814 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001815
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001816 /*
1817 * It's a non-present to present mapping. If hardware doesn't cache
1818 * non-present entry we only need to flush the write-buffer. If the
1819 * _does_ cache non-present entries, then it does so in the special
1820 * domain #0, which we have to flush:
1821 */
1822 if (cap_caching_mode(iommu->cap)) {
1823 iommu->flush.flush_context(iommu, 0,
1824 (((u16)bus) << 8) | devfn,
1825 DMA_CCMD_MASK_NOBIT,
1826 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001827 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001828 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001829 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001830 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001831 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001832 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001833
1834 spin_lock_irqsave(&domain->iommu_lock, flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08001835 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08001836 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001837 if (domain->iommu_count == 1)
1838 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001839 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001840 }
1841 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001842 return 0;
1843}
1844
1845static int
David Woodhousee1f167f2014-03-09 15:24:46 -07001846domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1847 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001848{
1849 int ret;
David Woodhousee1f167f2014-03-09 15:24:46 -07001850 struct pci_dev *pdev, *tmp, *parent;
David Woodhouse64ae8922014-03-09 12:52:30 -07001851 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001852 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001853
David Woodhousee1f167f2014-03-09 15:24:46 -07001854 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07001855 if (!iommu)
1856 return -ENODEV;
1857
David Woodhouse156baca2014-03-09 14:00:57 -07001858 ret = domain_context_mapping_one(domain, iommu, bus, devfn,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001859 translation);
David Woodhousee1f167f2014-03-09 15:24:46 -07001860 if (ret || !dev_is_pci(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001861 return ret;
1862
1863 /* dependent device mapping */
David Woodhousee1f167f2014-03-09 15:24:46 -07001864 pdev = to_pci_dev(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001865 tmp = pci_find_upstream_pcie_bridge(pdev);
1866 if (!tmp)
1867 return 0;
1868 /* Secondary interface's bus number and devfn 0 */
1869 parent = pdev->bus->self;
1870 while (parent != tmp) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001871 ret = domain_context_mapping_one(domain, iommu,
David Woodhouse276dbf992009-04-04 01:45:37 +01001872 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001873 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001874 if (ret)
1875 return ret;
1876 parent = parent->bus->self;
1877 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001878 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
David Woodhouse64ae8922014-03-09 12:52:30 -07001879 return domain_context_mapping_one(domain, iommu,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001880 tmp->subordinate->number, 0,
1881 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001882 else /* this is a legacy PCI bridge */
David Woodhouse64ae8922014-03-09 12:52:30 -07001883 return domain_context_mapping_one(domain, iommu,
David Woodhouse276dbf992009-04-04 01:45:37 +01001884 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001885 tmp->devfn,
1886 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001887}
1888
David Woodhousee1f167f2014-03-09 15:24:46 -07001889static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001890{
1891 int ret;
David Woodhousee1f167f2014-03-09 15:24:46 -07001892 struct pci_dev *pdev, *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001893 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001894 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08001895
David Woodhousee1f167f2014-03-09 15:24:46 -07001896 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001897 if (!iommu)
1898 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001899
David Woodhouse156baca2014-03-09 14:00:57 -07001900 ret = device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07001901 if (!ret || !dev_is_pci(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001902 return ret;
David Woodhousee1f167f2014-03-09 15:24:46 -07001903
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001904 /* dependent device mapping */
David Woodhousee1f167f2014-03-09 15:24:46 -07001905 pdev = to_pci_dev(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001906 tmp = pci_find_upstream_pcie_bridge(pdev);
1907 if (!tmp)
1908 return ret;
1909 /* Secondary interface's bus number and devfn 0 */
1910 parent = pdev->bus->self;
1911 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001912 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001913 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001914 if (!ret)
1915 return ret;
1916 parent = parent->bus->self;
1917 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001918 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001919 return device_context_mapped(iommu, tmp->subordinate->number,
1920 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001921 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001922 return device_context_mapped(iommu, tmp->bus->number,
1923 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001924}
1925
Fenghua Yuf5329592009-08-04 15:09:37 -07001926/* Returns a number of VTD pages, but aligned to MM page size */
1927static inline unsigned long aligned_nrpages(unsigned long host_addr,
1928 size_t size)
1929{
1930 host_addr &= ~PAGE_MASK;
1931 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1932}
1933
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001934/* Return largest possible superpage level for a given mapping */
1935static inline int hardware_largepage_caps(struct dmar_domain *domain,
1936 unsigned long iov_pfn,
1937 unsigned long phy_pfn,
1938 unsigned long pages)
1939{
1940 int support, level = 1;
1941 unsigned long pfnmerge;
1942
1943 support = domain->iommu_superpage;
1944
1945 /* To use a large page, the virtual *and* physical addresses
1946 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1947 of them will mean we have to use smaller pages. So just
1948 merge them and check both at once. */
1949 pfnmerge = iov_pfn | phy_pfn;
1950
1951 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1952 pages >>= VTD_STRIDE_SHIFT;
1953 if (!pages)
1954 break;
1955 pfnmerge >>= VTD_STRIDE_SHIFT;
1956 level++;
1957 support--;
1958 }
1959 return level;
1960}
1961
David Woodhouse9051aa02009-06-29 12:30:54 +01001962static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1963 struct scatterlist *sg, unsigned long phys_pfn,
1964 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001965{
1966 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001967 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001968 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001969 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001970 unsigned int largepage_lvl = 0;
1971 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001972
1973 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1974
1975 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1976 return -EINVAL;
1977
1978 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1979
David Woodhouse9051aa02009-06-29 12:30:54 +01001980 if (sg)
1981 sg_res = 0;
1982 else {
1983 sg_res = nr_pages + 1;
1984 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1985 }
1986
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001987 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001988 uint64_t tmp;
1989
David Woodhousee1605492009-06-29 11:17:38 +01001990 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001991 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001992 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1993 sg->dma_length = sg->length;
1994 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001995 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001996 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001997
David Woodhousee1605492009-06-29 11:17:38 +01001998 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001999 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2000
David Woodhouse5cf0a762014-03-19 16:07:49 +00002001 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002002 if (!pte)
2003 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002004 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002005 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002006 pteval |= DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002007 /* Ensure that old small page tables are removed to make room
2008 for superpage, if they exist. */
2009 dma_pte_clear_range(domain, iov_pfn,
2010 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
2011 dma_pte_free_pagetable(domain, iov_pfn,
2012 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
2013 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002014 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002015 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002016
David Woodhousee1605492009-06-29 11:17:38 +01002017 }
2018 /* We don't need lock here, nobody else
2019 * touches the iova range
2020 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002021 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002022 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002023 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01002024 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2025 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002026 if (dumps) {
2027 dumps--;
2028 debug_dma_dump_mappings(NULL);
2029 }
2030 WARN_ON(1);
2031 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002032
2033 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2034
2035 BUG_ON(nr_pages < lvl_pages);
2036 BUG_ON(sg_res < lvl_pages);
2037
2038 nr_pages -= lvl_pages;
2039 iov_pfn += lvl_pages;
2040 phys_pfn += lvl_pages;
2041 pteval += lvl_pages * VTD_PAGE_SIZE;
2042 sg_res -= lvl_pages;
2043
2044 /* If the next PTE would be the first in a new page, then we
2045 need to flush the cache on the entries we've just written.
2046 And then we'll need to recalculate 'pte', so clear it and
2047 let it get set again in the if (!pte) block above.
2048
2049 If we're done (!nr_pages) we need to flush the cache too.
2050
2051 Also if we've been setting superpages, we may need to
2052 recalculate 'pte' and switch back to smaller pages for the
2053 end of the mapping, if the trailing size is not enough to
2054 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002055 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002056 if (!nr_pages || first_pte_in_page(pte) ||
2057 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002058 domain_flush_cache(domain, first_pte,
2059 (void *)pte - (void *)first_pte);
2060 pte = NULL;
2061 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002062
2063 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002064 sg = sg_next(sg);
2065 }
2066 return 0;
2067}
2068
David Woodhouse9051aa02009-06-29 12:30:54 +01002069static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2070 struct scatterlist *sg, unsigned long nr_pages,
2071 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002072{
David Woodhouse9051aa02009-06-29 12:30:54 +01002073 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2074}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002075
David Woodhouse9051aa02009-06-29 12:30:54 +01002076static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2077 unsigned long phys_pfn, unsigned long nr_pages,
2078 int prot)
2079{
2080 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002081}
2082
Weidong Hanc7151a82008-12-08 22:51:37 +08002083static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002084{
Weidong Hanc7151a82008-12-08 22:51:37 +08002085 if (!iommu)
2086 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002087
2088 clear_context_table(iommu, bus, devfn);
2089 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002090 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002091 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002092}
2093
David Woodhouse109b9b02012-05-25 17:43:02 +01002094static inline void unlink_domain_info(struct device_domain_info *info)
2095{
2096 assert_spin_locked(&device_domain_lock);
2097 list_del(&info->link);
2098 list_del(&info->global);
2099 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002100 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002101}
2102
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002103static void domain_remove_dev_info(struct dmar_domain *domain)
2104{
2105 struct device_domain_info *info;
Jiang Liu92d03cc2014-02-19 14:07:28 +08002106 unsigned long flags, flags2;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002107
2108 spin_lock_irqsave(&device_domain_lock, flags);
2109 while (!list_empty(&domain->devices)) {
2110 info = list_entry(domain->devices.next,
2111 struct device_domain_info, link);
David Woodhouse109b9b02012-05-25 17:43:02 +01002112 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002113 spin_unlock_irqrestore(&device_domain_lock, flags);
2114
Yu Zhao93a23a72009-05-18 13:51:37 +08002115 iommu_disable_dev_iotlb(info);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002116 iommu_detach_dev(info->iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002117
Jiang Liu92d03cc2014-02-19 14:07:28 +08002118 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
David Woodhouse7c7faa12014-03-09 13:33:06 -07002119 iommu_detach_dependent_devices(info->iommu, info->dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +08002120 /* clear this iommu in iommu_bmp, update iommu count
2121 * and capabilities
2122 */
2123 spin_lock_irqsave(&domain->iommu_lock, flags2);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002124 if (test_and_clear_bit(info->iommu->seq_id,
Jiang Liu92d03cc2014-02-19 14:07:28 +08002125 domain->iommu_bmp)) {
2126 domain->iommu_count--;
2127 domain_update_iommu_cap(domain);
2128 }
2129 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
2130 }
2131
2132 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002133 spin_lock_irqsave(&device_domain_lock, flags);
2134 }
2135 spin_unlock_irqrestore(&device_domain_lock, flags);
2136}
2137
2138/*
2139 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002140 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002141 */
David Woodhouse1525a292014-03-06 16:19:30 +00002142static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002143{
2144 struct device_domain_info *info;
2145
2146 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002147 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002148 if (info)
2149 return info->domain;
2150 return NULL;
2151}
2152
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002153static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002154dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2155{
2156 struct device_domain_info *info;
2157
2158 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002159 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002160 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002161 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002162
2163 return NULL;
2164}
2165
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002166static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
David Woodhouse41e80dca2014-03-09 13:55:54 -07002167 int bus, int devfn,
David Woodhouseb718cd32014-03-09 13:11:33 -07002168 struct device *dev,
2169 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002170{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002171 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002172 struct device_domain_info *info;
2173 unsigned long flags;
2174
2175 info = alloc_devinfo_mem();
2176 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002177 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002178
Jiang Liu745f2582014-02-19 14:07:26 +08002179 info->bus = bus;
2180 info->devfn = devfn;
2181 info->dev = dev;
2182 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002183 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002184 if (!dev)
2185 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
2186
2187 spin_lock_irqsave(&device_domain_lock, flags);
2188 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002189 found = find_domain(dev);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002190 else {
2191 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002192 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002193 if (info2)
2194 found = info2->domain;
2195 }
Jiang Liu745f2582014-02-19 14:07:26 +08002196 if (found) {
2197 spin_unlock_irqrestore(&device_domain_lock, flags);
2198 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002199 /* Caller must free the original domain */
2200 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002201 }
2202
David Woodhouseb718cd32014-03-09 13:11:33 -07002203 list_add(&info->link, &domain->devices);
2204 list_add(&info->global, &device_domain_list);
2205 if (dev)
2206 dev->archdata.iommu = info;
2207 spin_unlock_irqrestore(&device_domain_lock, flags);
2208
2209 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002210}
2211
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002212/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002213static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002214{
Jiang Liue85bb5d2014-02-19 14:07:27 +08002215 struct dmar_domain *domain, *free = NULL;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002216 struct intel_iommu *iommu = NULL;
2217 struct device_domain_info *info;
David Woodhouse146922e2014-03-09 15:44:17 -07002218 struct pci_dev *dev_tmp = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002219 unsigned long flags;
David Woodhouse146922e2014-03-09 15:44:17 -07002220 u8 bus, devfn, bridge_bus, bridge_devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002221
David Woodhouse146922e2014-03-09 15:44:17 -07002222 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002223 if (domain)
2224 return domain;
2225
David Woodhouse146922e2014-03-09 15:44:17 -07002226 if (dev_is_pci(dev)) {
2227 struct pci_dev *pdev = to_pci_dev(dev);
2228 u16 segment;
David Woodhouse276dbf992009-04-04 01:45:37 +01002229
David Woodhouse146922e2014-03-09 15:44:17 -07002230 segment = pci_domain_nr(pdev->bus);
2231 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
2232 if (dev_tmp) {
2233 if (pci_is_pcie(dev_tmp)) {
2234 bridge_bus = dev_tmp->subordinate->number;
2235 bridge_devfn = 0;
2236 } else {
2237 bridge_bus = dev_tmp->bus->number;
2238 bridge_devfn = dev_tmp->devfn;
2239 }
2240 spin_lock_irqsave(&device_domain_lock, flags);
David Woodhouse9f05d3f2014-04-14 22:01:30 -07002241 info = dmar_search_domain_by_dev_info(segment,
2242 bridge_bus,
2243 bridge_devfn);
David Woodhouse146922e2014-03-09 15:44:17 -07002244 if (info) {
2245 iommu = info->iommu;
2246 domain = info->domain;
2247 }
2248 spin_unlock_irqrestore(&device_domain_lock, flags);
2249 /* pcie-pci bridge already has a domain, uses it */
2250 if (info)
2251 goto found_domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002252 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002253 }
2254
David Woodhouse146922e2014-03-09 15:44:17 -07002255 iommu = device_to_iommu(dev, &bus, &devfn);
2256 if (!iommu)
2257 goto error;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002258
David Woodhouse146922e2014-03-09 15:44:17 -07002259 /* Allocate and initialize new domain for the device */
Jiang Liu92d03cc2014-02-19 14:07:28 +08002260 domain = alloc_domain(false);
Jiang Liu745f2582014-02-19 14:07:26 +08002261 if (!domain)
2262 goto error;
2263 if (iommu_attach_domain(domain, iommu)) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002264 free_domain_mem(domain);
Dan Carpenter14d40562014-03-28 11:29:50 +03002265 domain = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002266 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002267 }
Jiang Liue85bb5d2014-02-19 14:07:27 +08002268 free = domain;
2269 if (domain_init(domain, gaw))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002270 goto error;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002271
2272 /* register pcie-to-pci device */
2273 if (dev_tmp) {
David Woodhouse146922e2014-03-09 15:44:17 -07002274 domain = dmar_insert_dev_info(iommu, bridge_bus, bridge_devfn,
2275 NULL, domain);
David Woodhouseb718cd32014-03-09 13:11:33 -07002276 if (!domain)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002277 goto error;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002278 }
2279
2280found_domain:
David Woodhouse146922e2014-03-09 15:44:17 -07002281 domain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002282error:
David Woodhouseb718cd32014-03-09 13:11:33 -07002283 if (free != domain)
Jiang Liue85bb5d2014-02-19 14:07:27 +08002284 domain_exit(free);
David Woodhouseb718cd32014-03-09 13:11:33 -07002285
2286 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002287}
2288
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002289static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002290#define IDENTMAP_ALL 1
2291#define IDENTMAP_GFX 2
2292#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002293
David Woodhouseb2132032009-06-26 18:50:28 +01002294static int iommu_domain_identity_map(struct dmar_domain *domain,
2295 unsigned long long start,
2296 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002297{
David Woodhousec5395d52009-06-28 16:35:56 +01002298 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2299 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002300
David Woodhousec5395d52009-06-28 16:35:56 +01002301 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2302 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002303 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002304 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002305 }
2306
David Woodhousec5395d52009-06-28 16:35:56 +01002307 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2308 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002309 /*
2310 * RMRR range might have overlap with physical memory range,
2311 * clear it first
2312 */
David Woodhousec5395d52009-06-28 16:35:56 +01002313 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002314
David Woodhousec5395d52009-06-28 16:35:56 +01002315 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2316 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002317 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002318}
2319
David Woodhouse0b9d9752014-03-09 15:48:15 -07002320static int iommu_prepare_identity_map(struct device *dev,
David Woodhouseb2132032009-06-26 18:50:28 +01002321 unsigned long long start,
2322 unsigned long long end)
2323{
2324 struct dmar_domain *domain;
2325 int ret;
2326
David Woodhouse0b9d9752014-03-09 15:48:15 -07002327 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002328 if (!domain)
2329 return -ENOMEM;
2330
David Woodhouse19943b02009-08-04 16:19:20 +01002331 /* For _hardware_ passthrough, don't bother. But for software
2332 passthrough, we do it anyway -- it may indicate a memory
2333 range which is reserved in E820, so which didn't get set
2334 up to start with in si_domain */
2335 if (domain == si_domain && hw_pass_through) {
2336 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002337 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002338 return 0;
2339 }
2340
2341 printk(KERN_INFO
2342 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002343 dev_name(dev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002344
David Woodhouse5595b522009-12-02 09:21:55 +00002345 if (end < start) {
2346 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2347 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2348 dmi_get_system_info(DMI_BIOS_VENDOR),
2349 dmi_get_system_info(DMI_BIOS_VERSION),
2350 dmi_get_system_info(DMI_PRODUCT_VERSION));
2351 ret = -EIO;
2352 goto error;
2353 }
2354
David Woodhouse2ff729f2009-08-26 14:25:41 +01002355 if (end >> agaw_to_width(domain->agaw)) {
2356 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2357 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2358 agaw_to_width(domain->agaw),
2359 dmi_get_system_info(DMI_BIOS_VENDOR),
2360 dmi_get_system_info(DMI_BIOS_VERSION),
2361 dmi_get_system_info(DMI_PRODUCT_VERSION));
2362 ret = -EIO;
2363 goto error;
2364 }
David Woodhouse19943b02009-08-04 16:19:20 +01002365
David Woodhouseb2132032009-06-26 18:50:28 +01002366 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002367 if (ret)
2368 goto error;
2369
2370 /* context entry init */
David Woodhouse0b9d9752014-03-09 15:48:15 -07002371 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002372 if (ret)
2373 goto error;
2374
2375 return 0;
2376
2377 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002378 domain_exit(domain);
2379 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002380}
2381
2382static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002383 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002384{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002385 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002386 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002387 return iommu_prepare_identity_map(dev, rmrr->base_address,
2388 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002389}
2390
Suresh Siddhad3f13812011-08-23 17:05:25 -07002391#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002392static inline void iommu_prepare_isa(void)
2393{
2394 struct pci_dev *pdev;
2395 int ret;
2396
2397 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2398 if (!pdev)
2399 return;
2400
David Woodhousec7ab48d2009-06-26 19:10:36 +01002401 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002402 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002403
2404 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002405 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2406 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002407
2408}
2409#else
2410static inline void iommu_prepare_isa(void)
2411{
2412 return;
2413}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002414#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002415
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002416static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002417
Matt Kraai071e1372009-08-23 22:30:22 -07002418static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002419{
2420 struct dmar_drhd_unit *drhd;
2421 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002422 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002423
Jiang Liu92d03cc2014-02-19 14:07:28 +08002424 si_domain = alloc_domain(false);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002425 if (!si_domain)
2426 return -EFAULT;
2427
Jiang Liu92d03cc2014-02-19 14:07:28 +08002428 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2429
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002430 for_each_active_iommu(iommu, drhd) {
2431 ret = iommu_attach_domain(si_domain, iommu);
2432 if (ret) {
2433 domain_exit(si_domain);
2434 return -EFAULT;
2435 }
2436 }
2437
2438 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2439 domain_exit(si_domain);
2440 return -EFAULT;
2441 }
2442
Jiang Liu9544c002014-01-06 14:18:13 +08002443 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2444 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002445
David Woodhouse19943b02009-08-04 16:19:20 +01002446 if (hw)
2447 return 0;
2448
David Woodhousec7ab48d2009-06-26 19:10:36 +01002449 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002450 unsigned long start_pfn, end_pfn;
2451 int i;
2452
2453 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2454 ret = iommu_domain_identity_map(si_domain,
2455 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2456 if (ret)
2457 return ret;
2458 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002459 }
2460
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002461 return 0;
2462}
2463
David Woodhouse9b226622014-03-09 14:03:28 -07002464static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002465{
2466 struct device_domain_info *info;
2467
2468 if (likely(!iommu_identity_mapping))
2469 return 0;
2470
David Woodhouse9b226622014-03-09 14:03:28 -07002471 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002472 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2473 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002474
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002475 return 0;
2476}
2477
2478static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5913c9b2014-03-09 16:27:31 -07002479 struct device *dev, int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002480{
David Woodhouse0ac72662014-03-09 13:19:22 -07002481 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002482 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002483 u8 bus, devfn;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002484 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002485
David Woodhouse5913c9b2014-03-09 16:27:31 -07002486 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002487 if (!iommu)
2488 return -ENODEV;
2489
David Woodhouse5913c9b2014-03-09 16:27:31 -07002490 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002491 if (ndomain != domain)
2492 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002493
David Woodhouse5913c9b2014-03-09 16:27:31 -07002494 ret = domain_context_mapping(domain, dev, translation);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002495 if (ret) {
David Woodhouse5913c9b2014-03-09 16:27:31 -07002496 domain_remove_one_dev_info(domain, dev);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002497 return ret;
2498 }
2499
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002500 return 0;
2501}
2502
David Woodhouse0b9d9752014-03-09 15:48:15 -07002503static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002504{
2505 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002506 struct device *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002507 int i;
2508
Jiang Liu0e242612014-02-19 14:07:34 +08002509 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002510 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002511 /*
2512 * Return TRUE if this RMRR contains the device that
2513 * is passed in.
2514 */
2515 for_each_active_dev_scope(rmrr->devices,
2516 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002517 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002518 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002519 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002520 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002521 }
Jiang Liu0e242612014-02-19 14:07:34 +08002522 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002523 return false;
2524}
2525
David Woodhouse3bdb2592014-03-09 16:03:08 -07002526static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002527{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002528
David Woodhouse3bdb2592014-03-09 16:03:08 -07002529 if (dev_is_pci(dev)) {
2530 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002531
David Woodhouse3bdb2592014-03-09 16:03:08 -07002532 /*
2533 * We want to prevent any device associated with an RMRR from
2534 * getting placed into the SI Domain. This is done because
2535 * problems exist when devices are moved in and out of domains
2536 * and their respective RMRR info is lost. We exempt USB devices
2537 * from this process due to their usage of RMRRs that are known
2538 * to not be needed after BIOS hand-off to OS.
2539 */
2540 if (device_has_rmrr(dev) &&
2541 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2542 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002543
David Woodhouse3bdb2592014-03-09 16:03:08 -07002544 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2545 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002546
David Woodhouse3bdb2592014-03-09 16:03:08 -07002547 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2548 return 1;
2549
2550 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2551 return 0;
2552
2553 /*
2554 * We want to start off with all devices in the 1:1 domain, and
2555 * take them out later if we find they can't access all of memory.
2556 *
2557 * However, we can't do this for PCI devices behind bridges,
2558 * because all PCI devices behind the same bridge will end up
2559 * with the same source-id on their transactions.
2560 *
2561 * Practically speaking, we can't change things around for these
2562 * devices at run-time, because we can't be sure there'll be no
2563 * DMA transactions in flight for any of their siblings.
2564 *
2565 * So PCI devices (unless they're on the root bus) as well as
2566 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2567 * the 1:1 domain, just in _case_ one of their siblings turns out
2568 * not to be able to map all of memory.
2569 */
2570 if (!pci_is_pcie(pdev)) {
2571 if (!pci_is_root_bus(pdev->bus))
2572 return 0;
2573 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2574 return 0;
2575 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2576 return 0;
2577 } else {
2578 if (device_has_rmrr(dev))
2579 return 0;
2580 }
David Woodhouse6941af22009-07-04 18:24:27 +01002581
David Woodhouse3dfc8132009-07-04 19:11:08 +01002582 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002583 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002584 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002585 * take them out of the 1:1 domain later.
2586 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002587 if (!startup) {
2588 /*
2589 * If the device's dma_mask is less than the system's memory
2590 * size then this is not a candidate for identity mapping.
2591 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002592 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002593
David Woodhouse3bdb2592014-03-09 16:03:08 -07002594 if (dev->coherent_dma_mask &&
2595 dev->coherent_dma_mask < dma_mask)
2596 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002597
David Woodhouse3bdb2592014-03-09 16:03:08 -07002598 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002599 }
David Woodhouse6941af22009-07-04 18:24:27 +01002600
2601 return 1;
2602}
2603
David Woodhousecf04eee2014-03-21 16:49:04 +00002604static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2605{
2606 int ret;
2607
2608 if (!iommu_should_identity_map(dev, 1))
2609 return 0;
2610
2611 ret = domain_add_dev_info(si_domain, dev,
2612 hw ? CONTEXT_TT_PASS_THROUGH :
2613 CONTEXT_TT_MULTI_LEVEL);
2614 if (!ret)
2615 pr_info("IOMMU: %s identity mapping for device %s\n",
2616 hw ? "hardware" : "software", dev_name(dev));
2617 else if (ret == -ENODEV)
2618 /* device not associated with an iommu */
2619 ret = 0;
2620
2621 return ret;
2622}
2623
2624
Matt Kraai071e1372009-08-23 22:30:22 -07002625static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002626{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002627 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002628 struct dmar_drhd_unit *drhd;
2629 struct intel_iommu *iommu;
2630 struct device *dev;
2631 int i;
2632 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002633
David Woodhouse19943b02009-08-04 16:19:20 +01002634 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002635 if (ret)
2636 return -EFAULT;
2637
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002638 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002639 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2640 if (ret)
2641 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002642 }
2643
David Woodhousecf04eee2014-03-21 16:49:04 +00002644 for_each_active_iommu(iommu, drhd)
2645 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2646 struct acpi_device_physical_node *pn;
2647 struct acpi_device *adev;
2648
2649 if (dev->bus != &acpi_bus_type)
2650 continue;
2651
2652 adev= to_acpi_device(dev);
2653 mutex_lock(&adev->physical_node_lock);
2654 list_for_each_entry(pn, &adev->physical_node_list, node) {
2655 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2656 if (ret)
2657 break;
2658 }
2659 mutex_unlock(&adev->physical_node_lock);
2660 if (ret)
2661 return ret;
2662 }
2663
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002664 return 0;
2665}
2666
Joseph Cihulab7792602011-05-03 00:08:37 -07002667static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002668{
2669 struct dmar_drhd_unit *drhd;
2670 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002671 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002672 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002673 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002674
2675 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002676 * for each drhd
2677 * allocate root
2678 * initialize and program root entry to not present
2679 * endfor
2680 */
2681 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002682 /*
2683 * lock not needed as this is only incremented in the single
2684 * threaded kernel __init code path all other access are read
2685 * only
2686 */
Mike Travis1b198bb2012-03-05 15:05:16 -08002687 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2688 g_num_of_iommus++;
2689 continue;
2690 }
2691 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2692 IOMMU_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002693 }
2694
Weidong Hand9630fe2008-12-08 11:06:32 +08002695 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2696 GFP_KERNEL);
2697 if (!g_iommus) {
2698 printk(KERN_ERR "Allocating global iommu array failed\n");
2699 ret = -ENOMEM;
2700 goto error;
2701 }
2702
mark gross80b20dd2008-04-18 13:53:58 -07002703 deferred_flush = kzalloc(g_num_of_iommus *
2704 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2705 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002706 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08002707 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08002708 }
2709
Jiang Liu7c919772014-01-06 14:18:18 +08002710 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002711 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002712
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002713 ret = iommu_init_domains(iommu);
2714 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002715 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002716
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002717 /*
2718 * TBD:
2719 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002720 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002721 */
2722 ret = iommu_alloc_root_entry(iommu);
2723 if (ret) {
2724 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002725 goto free_iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002726 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002727 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002728 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002729 }
2730
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002731 /*
2732 * Start from the sane iommu hardware state.
2733 */
Jiang Liu7c919772014-01-06 14:18:18 +08002734 for_each_active_iommu(iommu, drhd) {
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002735 /*
2736 * If the queued invalidation is already initialized by us
2737 * (for example, while enabling interrupt-remapping) then
2738 * we got the things already rolling from a sane state.
2739 */
2740 if (iommu->qi)
2741 continue;
2742
2743 /*
2744 * Clear any previous faults.
2745 */
2746 dmar_fault(-1, iommu);
2747 /*
2748 * Disable queued invalidation if supported and already enabled
2749 * before OS handover.
2750 */
2751 dmar_disable_qi(iommu);
2752 }
2753
Jiang Liu7c919772014-01-06 14:18:18 +08002754 for_each_active_iommu(iommu, drhd) {
Youquan Songa77b67d2008-10-16 16:31:56 -07002755 if (dmar_enable_qi(iommu)) {
2756 /*
2757 * Queued Invalidate not enabled, use Register Based
2758 * Invalidate
2759 */
2760 iommu->flush.flush_context = __iommu_flush_context;
2761 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002762 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002763 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002764 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002765 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002766 } else {
2767 iommu->flush.flush_context = qi_flush_context;
2768 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002769 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002770 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002771 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002772 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002773 }
2774 }
2775
David Woodhouse19943b02009-08-04 16:19:20 +01002776 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002777 iommu_identity_mapping |= IDENTMAP_ALL;
2778
Suresh Siddhad3f13812011-08-23 17:05:25 -07002779#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002780 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002781#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002782
2783 check_tylersburg_isoch();
2784
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002785 /*
2786 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002787 * identity mappings for rmrr, gfx, and isa and may fall back to static
2788 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002789 */
David Woodhouse19943b02009-08-04 16:19:20 +01002790 if (iommu_identity_mapping) {
2791 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2792 if (ret) {
2793 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002794 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002795 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002796 }
David Woodhouse19943b02009-08-04 16:19:20 +01002797 /*
2798 * For each rmrr
2799 * for each dev attached to rmrr
2800 * do
2801 * locate drhd for dev, alloc domain for dev
2802 * allocate free domain
2803 * allocate page table entries for rmrr
2804 * if context not allocated for bus
2805 * allocate and init context
2806 * set present in root table for this bus
2807 * init context with domain, translation etc
2808 * endfor
2809 * endfor
2810 */
2811 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2812 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002813 /* some BIOS lists non-exist devices in DMAR table. */
2814 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00002815 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07002816 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01002817 if (ret)
2818 printk(KERN_ERR
2819 "IOMMU: mapping reserved region failed\n");
2820 }
2821 }
2822
2823 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002824
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002825 /*
2826 * for each drhd
2827 * enable fault log
2828 * global invalidate context cache
2829 * global invalidate iotlb
2830 * enable translation
2831 */
Jiang Liu7c919772014-01-06 14:18:18 +08002832 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002833 if (drhd->ignored) {
2834 /*
2835 * we always have to disable PMRs or DMA may fail on
2836 * this device
2837 */
2838 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08002839 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002840 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002841 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002842
2843 iommu_flush_write_buffer(iommu);
2844
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002845 ret = dmar_set_interrupt(iommu);
2846 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002847 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002848
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002849 iommu_set_root_entry(iommu);
2850
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002851 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002852 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002853
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002854 ret = iommu_enable_translation(iommu);
2855 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002856 goto free_iommu;
David Woodhouseb94996c2009-09-19 15:28:12 -07002857
2858 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002859 }
2860
2861 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08002862
2863free_iommu:
Jiang Liu7c919772014-01-06 14:18:18 +08002864 for_each_active_iommu(iommu, drhd)
Jiang Liua868e6b2014-01-06 14:18:20 +08002865 free_dmar_iommu(iommu);
Jiang Liu9bdc5312014-01-06 14:18:27 +08002866 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08002867free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08002868 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08002869error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002870 return ret;
2871}
2872
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002873/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002874static struct iova *intel_alloc_iova(struct device *dev,
2875 struct dmar_domain *domain,
2876 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002877{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002878 struct iova *iova = NULL;
2879
David Woodhouse875764d2009-06-28 21:20:51 +01002880 /* Restrict dma_mask to the width that the iommu can handle */
2881 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2882
2883 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002884 /*
2885 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002886 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002887 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002888 */
David Woodhouse875764d2009-06-28 21:20:51 +01002889 iova = alloc_iova(&domain->iovad, nrpages,
2890 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2891 if (iova)
2892 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002893 }
David Woodhouse875764d2009-06-28 21:20:51 +01002894 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2895 if (unlikely(!iova)) {
2896 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07002897 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002898 return NULL;
2899 }
2900
2901 return iova;
2902}
2903
David Woodhoused4b709f2014-03-09 16:07:40 -07002904static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002905{
2906 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002907 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002908
David Woodhoused4b709f2014-03-09 16:07:40 -07002909 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002910 if (!domain) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002911 printk(KERN_ERR "Allocating domain for %s failed",
2912 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002913 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002914 }
2915
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002916 /* make sure context mapping is ok */
David Woodhoused4b709f2014-03-09 16:07:40 -07002917 if (unlikely(!domain_context_mapped(dev))) {
2918 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002919 if (ret) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002920 printk(KERN_ERR "Domain context map for %s failed",
2921 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002922 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002923 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002924 }
2925
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002926 return domain;
2927}
2928
David Woodhoused4b709f2014-03-09 16:07:40 -07002929static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01002930{
2931 struct device_domain_info *info;
2932
2933 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07002934 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01002935 if (likely(info))
2936 return info->domain;
2937
2938 return __get_valid_domain_for_dev(dev);
2939}
2940
David Woodhouse3d891942014-03-06 15:59:26 +00002941static int iommu_dummy(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002942{
David Woodhouse3d891942014-03-06 15:59:26 +00002943 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002944}
2945
David Woodhouseecb509e2014-03-09 16:29:55 -07002946/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002947static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002948{
2949 int found;
2950
David Woodhouse3d891942014-03-06 15:59:26 +00002951 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002952 return 1;
2953
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002954 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002955 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002956
David Woodhouse9b226622014-03-09 14:03:28 -07002957 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002958 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07002959 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002960 return 1;
2961 else {
2962 /*
2963 * 32 bit DMA is removed from si_domain and fall back
2964 * to non-identity mapping.
2965 */
David Woodhousebf9c9ed2014-03-09 16:19:13 -07002966 domain_remove_one_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002967 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07002968 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002969 return 0;
2970 }
2971 } else {
2972 /*
2973 * In case of a detached 64 bit DMA device from vm, the device
2974 * is put into si_domain for identity mapping.
2975 */
David Woodhouseecb509e2014-03-09 16:29:55 -07002976 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002977 int ret;
David Woodhouse5913c9b2014-03-09 16:27:31 -07002978 ret = domain_add_dev_info(si_domain, dev,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002979 hw_pass_through ?
2980 CONTEXT_TT_PASS_THROUGH :
2981 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002982 if (!ret) {
2983 printk(KERN_INFO "64bit %s uses identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07002984 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002985 return 1;
2986 }
2987 }
2988 }
2989
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002990 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002991}
2992
David Woodhouse5040a912014-03-09 16:14:00 -07002993static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002994 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002995{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002996 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002997 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002998 struct iova *iova;
2999 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003000 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003001 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003002 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003003
3004 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003005
David Woodhouse5040a912014-03-09 16:14:00 -07003006 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003007 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003008
David Woodhouse5040a912014-03-09 16:14:00 -07003009 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003010 if (!domain)
3011 return 0;
3012
Weidong Han8c11e792008-12-08 15:29:22 +08003013 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003014 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003015
David Woodhouse5040a912014-03-09 16:14:00 -07003016 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003017 if (!iova)
3018 goto error;
3019
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003020 /*
3021 * Check if DMAR supports zero-length reads on write only
3022 * mappings..
3023 */
3024 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003025 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003026 prot |= DMA_PTE_READ;
3027 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3028 prot |= DMA_PTE_WRITE;
3029 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003030 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003031 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003032 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003033 * is not a big problem
3034 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003035 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003036 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003037 if (ret)
3038 goto error;
3039
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003040 /* it's a non-present to present mapping. Only flush if caching mode */
3041 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003042 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003043 else
Weidong Han8c11e792008-12-08 15:29:22 +08003044 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003045
David Woodhouse03d6a242009-06-28 15:33:46 +01003046 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3047 start_paddr += paddr & ~PAGE_MASK;
3048 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003049
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003050error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003051 if (iova)
3052 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00003053 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003054 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003055 return 0;
3056}
3057
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003058static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3059 unsigned long offset, size_t size,
3060 enum dma_data_direction dir,
3061 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003062{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003063 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003064 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003065}
3066
mark gross5e0d2a62008-03-04 15:22:08 -08003067static void flush_unmaps(void)
3068{
mark gross80b20dd2008-04-18 13:53:58 -07003069 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003070
mark gross5e0d2a62008-03-04 15:22:08 -08003071 timer_on = 0;
3072
3073 /* just flush them all */
3074 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003075 struct intel_iommu *iommu = g_iommus[i];
3076 if (!iommu)
3077 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003078
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003079 if (!deferred_flush[i].next)
3080 continue;
3081
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003082 /* In caching mode, global flushes turn emulation expensive */
3083 if (!cap_caching_mode(iommu->cap))
3084 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003085 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003086 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003087 unsigned long mask;
3088 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003089 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003090
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003091 /* On real hardware multiple invalidations are expensive */
3092 if (cap_caching_mode(iommu->cap))
3093 iommu_flush_iotlb_psi(iommu, domain->id,
David Woodhouseea8ea462014-03-05 17:09:32 +00003094 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1,
3095 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003096 else {
3097 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
3098 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3099 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3100 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003101 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003102 if (deferred_flush[i].freelist[j])
3103 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003104 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003105 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003106 }
3107
mark gross5e0d2a62008-03-04 15:22:08 -08003108 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003109}
3110
3111static void flush_unmaps_timeout(unsigned long data)
3112{
mark gross80b20dd2008-04-18 13:53:58 -07003113 unsigned long flags;
3114
3115 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003116 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003117 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003118}
3119
David Woodhouseea8ea462014-03-05 17:09:32 +00003120static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003121{
3122 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003123 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003124 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003125
3126 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003127 if (list_size == HIGH_WATER_MARK)
3128 flush_unmaps();
3129
Weidong Han8c11e792008-12-08 15:29:22 +08003130 iommu = domain_get_iommu(dom);
3131 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003132
mark gross80b20dd2008-04-18 13:53:58 -07003133 next = deferred_flush[iommu_id].next;
3134 deferred_flush[iommu_id].domain[next] = dom;
3135 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003136 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003137 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003138
3139 if (!timer_on) {
3140 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3141 timer_on = 1;
3142 }
3143 list_size++;
3144 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3145}
3146
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003147static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3148 size_t size, enum dma_data_direction dir,
3149 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003150{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003151 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003152 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003153 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003154 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003155 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003156
David Woodhouse73676832009-07-04 14:08:36 +01003157 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003158 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003159
David Woodhouse1525a292014-03-06 16:19:30 +00003160 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003161 BUG_ON(!domain);
3162
Weidong Han8c11e792008-12-08 15:29:22 +08003163 iommu = domain_get_iommu(domain);
3164
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003165 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003166 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3167 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003168 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003169
David Woodhoused794dc92009-06-28 00:27:49 +01003170 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3171 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003172
David Woodhoused794dc92009-06-28 00:27:49 +01003173 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003174 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003175
David Woodhouseea8ea462014-03-05 17:09:32 +00003176 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003177
mark gross5e0d2a62008-03-04 15:22:08 -08003178 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01003179 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003180 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003181 /* free iova */
3182 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003183 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003184 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003185 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003186 /*
3187 * queue up the release of the unmap to save the 1/6th of the
3188 * cpu used up by the iotlb flush operation...
3189 */
mark gross5e0d2a62008-03-04 15:22:08 -08003190 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003191}
3192
David Woodhouse5040a912014-03-09 16:14:00 -07003193static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003194 dma_addr_t *dma_handle, gfp_t flags,
3195 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003196{
Akinobu Mita36746432014-06-04 16:06:51 -07003197 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003198 int order;
3199
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003200 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003201 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003202
David Woodhouse5040a912014-03-09 16:14:00 -07003203 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003204 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003205 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3206 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003207 flags |= GFP_DMA;
3208 else
3209 flags |= GFP_DMA32;
3210 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003211
Akinobu Mita36746432014-06-04 16:06:51 -07003212 if (flags & __GFP_WAIT) {
3213 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003214
Akinobu Mita36746432014-06-04 16:06:51 -07003215 page = dma_alloc_from_contiguous(dev, count, order);
3216 if (page && iommu_no_mapping(dev) &&
3217 page_to_phys(page) + size > dev->coherent_dma_mask) {
3218 dma_release_from_contiguous(dev, page, count);
3219 page = NULL;
3220 }
3221 }
3222
3223 if (!page)
3224 page = alloc_pages(flags, order);
3225 if (!page)
3226 return NULL;
3227 memset(page_address(page), 0, size);
3228
3229 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003230 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003231 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003232 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003233 return page_address(page);
3234 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3235 __free_pages(page, order);
3236
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003237 return NULL;
3238}
3239
David Woodhouse5040a912014-03-09 16:14:00 -07003240static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003241 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003242{
3243 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003244 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003245
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003246 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003247 order = get_order(size);
3248
David Woodhouse5040a912014-03-09 16:14:00 -07003249 intel_unmap_page(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Akinobu Mita36746432014-06-04 16:06:51 -07003250 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3251 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003252}
3253
David Woodhouse5040a912014-03-09 16:14:00 -07003254static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003255 int nelems, enum dma_data_direction dir,
3256 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003257{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003258 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003259 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003260 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003261 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003262 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003263
David Woodhouse5040a912014-03-09 16:14:00 -07003264 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003265 return;
3266
David Woodhouse5040a912014-03-09 16:14:00 -07003267 domain = find_domain(dev);
Weidong Han8c11e792008-12-08 15:29:22 +08003268 BUG_ON(!domain);
3269
3270 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003271
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003272 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01003273 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3274 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003275 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003276
David Woodhoused794dc92009-06-28 00:27:49 +01003277 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3278 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003279
David Woodhouseea8ea462014-03-05 17:09:32 +00003280 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003281
David Woodhouseacea0012009-07-14 01:55:11 +01003282 if (intel_iommu_strict) {
3283 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003284 last_pfn - start_pfn + 1, !freelist, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003285 /* free iova */
3286 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003287 dma_free_pagelist(freelist);
David Woodhouseacea0012009-07-14 01:55:11 +01003288 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003289 add_unmap(domain, iova, freelist);
David Woodhouseacea0012009-07-14 01:55:11 +01003290 /*
3291 * queue up the release of the unmap to save the 1/6th of the
3292 * cpu used up by the iotlb flush operation...
3293 */
3294 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003295}
3296
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003297static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003298 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003299{
3300 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003301 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003302
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003303 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003304 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003305 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003306 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003307 }
3308 return nelems;
3309}
3310
David Woodhouse5040a912014-03-09 16:14:00 -07003311static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003312 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003313{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003314 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003315 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003316 size_t size = 0;
3317 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003318 struct iova *iova = NULL;
3319 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003320 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003321 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003322 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003323
3324 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003325 if (iommu_no_mapping(dev))
3326 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003327
David Woodhouse5040a912014-03-09 16:14:00 -07003328 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003329 if (!domain)
3330 return 0;
3331
Weidong Han8c11e792008-12-08 15:29:22 +08003332 iommu = domain_get_iommu(domain);
3333
David Woodhouseb536d242009-06-28 14:49:31 +01003334 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003335 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003336
David Woodhouse5040a912014-03-09 16:14:00 -07003337 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3338 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003339 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003340 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003341 return 0;
3342 }
3343
3344 /*
3345 * Check if DMAR supports zero-length reads on write only
3346 * mappings..
3347 */
3348 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003349 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003350 prot |= DMA_PTE_READ;
3351 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3352 prot |= DMA_PTE_WRITE;
3353
David Woodhouseb536d242009-06-28 14:49:31 +01003354 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003355
Fenghua Yuf5329592009-08-04 15:09:37 -07003356 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003357 if (unlikely(ret)) {
3358 /* clear the page */
3359 dma_pte_clear_range(domain, start_vpfn,
3360 start_vpfn + size - 1);
3361 /* free page tables */
3362 dma_pte_free_pagetable(domain, start_vpfn,
3363 start_vpfn + size - 1);
3364 /* free iova */
3365 __free_iova(&domain->iovad, iova);
3366 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003367 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003368
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003369 /* it's a non-present to present mapping. Only flush if caching mode */
3370 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003371 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003372 else
Weidong Han8c11e792008-12-08 15:29:22 +08003373 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003374
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003375 return nelems;
3376}
3377
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003378static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3379{
3380 return !dma_addr;
3381}
3382
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003383struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003384 .alloc = intel_alloc_coherent,
3385 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003386 .map_sg = intel_map_sg,
3387 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003388 .map_page = intel_map_page,
3389 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003390 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003391};
3392
3393static inline int iommu_domain_cache_init(void)
3394{
3395 int ret = 0;
3396
3397 iommu_domain_cache = kmem_cache_create("iommu_domain",
3398 sizeof(struct dmar_domain),
3399 0,
3400 SLAB_HWCACHE_ALIGN,
3401
3402 NULL);
3403 if (!iommu_domain_cache) {
3404 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3405 ret = -ENOMEM;
3406 }
3407
3408 return ret;
3409}
3410
3411static inline int iommu_devinfo_cache_init(void)
3412{
3413 int ret = 0;
3414
3415 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3416 sizeof(struct device_domain_info),
3417 0,
3418 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003419 NULL);
3420 if (!iommu_devinfo_cache) {
3421 printk(KERN_ERR "Couldn't create devinfo cache\n");
3422 ret = -ENOMEM;
3423 }
3424
3425 return ret;
3426}
3427
3428static inline int iommu_iova_cache_init(void)
3429{
3430 int ret = 0;
3431
3432 iommu_iova_cache = kmem_cache_create("iommu_iova",
3433 sizeof(struct iova),
3434 0,
3435 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003436 NULL);
3437 if (!iommu_iova_cache) {
3438 printk(KERN_ERR "Couldn't create iova cache\n");
3439 ret = -ENOMEM;
3440 }
3441
3442 return ret;
3443}
3444
3445static int __init iommu_init_mempool(void)
3446{
3447 int ret;
3448 ret = iommu_iova_cache_init();
3449 if (ret)
3450 return ret;
3451
3452 ret = iommu_domain_cache_init();
3453 if (ret)
3454 goto domain_error;
3455
3456 ret = iommu_devinfo_cache_init();
3457 if (!ret)
3458 return ret;
3459
3460 kmem_cache_destroy(iommu_domain_cache);
3461domain_error:
3462 kmem_cache_destroy(iommu_iova_cache);
3463
3464 return -ENOMEM;
3465}
3466
3467static void __init iommu_exit_mempool(void)
3468{
3469 kmem_cache_destroy(iommu_devinfo_cache);
3470 kmem_cache_destroy(iommu_domain_cache);
3471 kmem_cache_destroy(iommu_iova_cache);
3472
3473}
3474
Dan Williams556ab452010-07-23 15:47:56 -07003475static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3476{
3477 struct dmar_drhd_unit *drhd;
3478 u32 vtbar;
3479 int rc;
3480
3481 /* We know that this device on this chipset has its own IOMMU.
3482 * If we find it under a different IOMMU, then the BIOS is lying
3483 * to us. Hope that the IOMMU for this device is actually
3484 * disabled, and it needs no translation...
3485 */
3486 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3487 if (rc) {
3488 /* "can't" happen */
3489 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3490 return;
3491 }
3492 vtbar &= 0xffff0000;
3493
3494 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3495 drhd = dmar_find_matched_drhd_unit(pdev);
3496 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3497 TAINT_FIRMWARE_WORKAROUND,
3498 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3499 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3500}
3501DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3502
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003503static void __init init_no_remapping_devices(void)
3504{
3505 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003506 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003507 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003508
3509 for_each_drhd_unit(drhd) {
3510 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003511 for_each_active_dev_scope(drhd->devices,
3512 drhd->devices_cnt, i, dev)
3513 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003514 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003515 if (i == drhd->devices_cnt)
3516 drhd->ignored = 1;
3517 }
3518 }
3519
Jiang Liu7c919772014-01-06 14:18:18 +08003520 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003521 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003522 continue;
3523
Jiang Liub683b232014-02-19 14:07:32 +08003524 for_each_active_dev_scope(drhd->devices,
3525 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003526 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003527 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003528 if (i < drhd->devices_cnt)
3529 continue;
3530
David Woodhousec0771df2011-10-14 20:59:46 +01003531 /* This IOMMU has *only* gfx devices. Either bypass it or
3532 set the gfx_mapped flag, as appropriate */
3533 if (dmar_map_gfx) {
3534 intel_iommu_gfx_mapped = 1;
3535 } else {
3536 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003537 for_each_active_dev_scope(drhd->devices,
3538 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003539 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003540 }
3541 }
3542}
3543
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003544#ifdef CONFIG_SUSPEND
3545static int init_iommu_hw(void)
3546{
3547 struct dmar_drhd_unit *drhd;
3548 struct intel_iommu *iommu = NULL;
3549
3550 for_each_active_iommu(iommu, drhd)
3551 if (iommu->qi)
3552 dmar_reenable_qi(iommu);
3553
Joseph Cihulab7792602011-05-03 00:08:37 -07003554 for_each_iommu(iommu, drhd) {
3555 if (drhd->ignored) {
3556 /*
3557 * we always have to disable PMRs or DMA may fail on
3558 * this device
3559 */
3560 if (force_on)
3561 iommu_disable_protect_mem_regions(iommu);
3562 continue;
3563 }
3564
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003565 iommu_flush_write_buffer(iommu);
3566
3567 iommu_set_root_entry(iommu);
3568
3569 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003570 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003571 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003572 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003573 if (iommu_enable_translation(iommu))
3574 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003575 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003576 }
3577
3578 return 0;
3579}
3580
3581static void iommu_flush_all(void)
3582{
3583 struct dmar_drhd_unit *drhd;
3584 struct intel_iommu *iommu;
3585
3586 for_each_active_iommu(iommu, drhd) {
3587 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003588 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003589 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003590 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003591 }
3592}
3593
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003594static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003595{
3596 struct dmar_drhd_unit *drhd;
3597 struct intel_iommu *iommu = NULL;
3598 unsigned long flag;
3599
3600 for_each_active_iommu(iommu, drhd) {
3601 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3602 GFP_ATOMIC);
3603 if (!iommu->iommu_state)
3604 goto nomem;
3605 }
3606
3607 iommu_flush_all();
3608
3609 for_each_active_iommu(iommu, drhd) {
3610 iommu_disable_translation(iommu);
3611
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003612 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003613
3614 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3615 readl(iommu->reg + DMAR_FECTL_REG);
3616 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3617 readl(iommu->reg + DMAR_FEDATA_REG);
3618 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3619 readl(iommu->reg + DMAR_FEADDR_REG);
3620 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3621 readl(iommu->reg + DMAR_FEUADDR_REG);
3622
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003623 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003624 }
3625 return 0;
3626
3627nomem:
3628 for_each_active_iommu(iommu, drhd)
3629 kfree(iommu->iommu_state);
3630
3631 return -ENOMEM;
3632}
3633
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003634static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003635{
3636 struct dmar_drhd_unit *drhd;
3637 struct intel_iommu *iommu = NULL;
3638 unsigned long flag;
3639
3640 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003641 if (force_on)
3642 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3643 else
3644 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003645 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003646 }
3647
3648 for_each_active_iommu(iommu, drhd) {
3649
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003650 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003651
3652 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3653 iommu->reg + DMAR_FECTL_REG);
3654 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3655 iommu->reg + DMAR_FEDATA_REG);
3656 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3657 iommu->reg + DMAR_FEADDR_REG);
3658 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3659 iommu->reg + DMAR_FEUADDR_REG);
3660
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003661 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003662 }
3663
3664 for_each_active_iommu(iommu, drhd)
3665 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003666}
3667
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003668static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003669 .resume = iommu_resume,
3670 .suspend = iommu_suspend,
3671};
3672
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003673static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003674{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003675 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003676}
3677
3678#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003679static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003680#endif /* CONFIG_PM */
3681
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003682
3683int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3684{
3685 struct acpi_dmar_reserved_memory *rmrr;
3686 struct dmar_rmrr_unit *rmrru;
3687
3688 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3689 if (!rmrru)
3690 return -ENOMEM;
3691
3692 rmrru->hdr = header;
3693 rmrr = (struct acpi_dmar_reserved_memory *)header;
3694 rmrru->base_address = rmrr->base_address;
3695 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003696 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3697 ((void *)rmrr) + rmrr->header.length,
3698 &rmrru->devices_cnt);
3699 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3700 kfree(rmrru);
3701 return -ENOMEM;
3702 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003703
Jiang Liu2e455282014-02-19 14:07:36 +08003704 list_add(&rmrru->list, &dmar_rmrr_units);
3705
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003706 return 0;
3707}
3708
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003709int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3710{
3711 struct acpi_dmar_atsr *atsr;
3712 struct dmar_atsr_unit *atsru;
3713
3714 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3715 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3716 if (!atsru)
3717 return -ENOMEM;
3718
3719 atsru->hdr = hdr;
3720 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08003721 if (!atsru->include_all) {
3722 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3723 (void *)atsr + atsr->header.length,
3724 &atsru->devices_cnt);
3725 if (atsru->devices_cnt && atsru->devices == NULL) {
3726 kfree(atsru);
3727 return -ENOMEM;
3728 }
3729 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003730
Jiang Liu0e242612014-02-19 14:07:34 +08003731 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003732
3733 return 0;
3734}
3735
Jiang Liu9bdc5312014-01-06 14:18:27 +08003736static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3737{
3738 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3739 kfree(atsru);
3740}
3741
3742static void intel_iommu_free_dmars(void)
3743{
3744 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3745 struct dmar_atsr_unit *atsru, *atsr_n;
3746
3747 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3748 list_del(&rmrru->list);
3749 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3750 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003751 }
3752
Jiang Liu9bdc5312014-01-06 14:18:27 +08003753 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3754 list_del(&atsru->list);
3755 intel_iommu_free_atsr(atsru);
3756 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003757}
3758
3759int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3760{
Jiang Liub683b232014-02-19 14:07:32 +08003761 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003762 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00003763 struct pci_dev *bridge = NULL;
3764 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003765 struct acpi_dmar_atsr *atsr;
3766 struct dmar_atsr_unit *atsru;
3767
3768 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003769 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08003770 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003771 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003772 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003773 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003774 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003775 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003776 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08003777 if (!bridge)
3778 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003779
Jiang Liu0e242612014-02-19 14:07:34 +08003780 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08003781 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3782 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3783 if (atsr->segment != pci_domain_nr(dev->bus))
3784 continue;
3785
Jiang Liub683b232014-02-19 14:07:32 +08003786 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00003787 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08003788 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003789
3790 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08003791 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003792 }
Jiang Liub683b232014-02-19 14:07:32 +08003793 ret = 0;
3794out:
Jiang Liu0e242612014-02-19 14:07:34 +08003795 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003796
Jiang Liub683b232014-02-19 14:07:32 +08003797 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003798}
3799
Jiang Liu59ce0512014-02-19 14:07:35 +08003800int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3801{
3802 int ret = 0;
3803 struct dmar_rmrr_unit *rmrru;
3804 struct dmar_atsr_unit *atsru;
3805 struct acpi_dmar_atsr *atsr;
3806 struct acpi_dmar_reserved_memory *rmrr;
3807
3808 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3809 return 0;
3810
3811 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3812 rmrr = container_of(rmrru->hdr,
3813 struct acpi_dmar_reserved_memory, header);
3814 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3815 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3816 ((void *)rmrr) + rmrr->header.length,
3817 rmrr->segment, rmrru->devices,
3818 rmrru->devices_cnt);
3819 if (ret > 0)
3820 break;
3821 else if(ret < 0)
3822 return ret;
3823 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3824 if (dmar_remove_dev_scope(info, rmrr->segment,
3825 rmrru->devices, rmrru->devices_cnt))
3826 break;
3827 }
3828 }
3829
3830 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3831 if (atsru->include_all)
3832 continue;
3833
3834 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3835 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3836 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3837 (void *)atsr + atsr->header.length,
3838 atsr->segment, atsru->devices,
3839 atsru->devices_cnt);
3840 if (ret > 0)
3841 break;
3842 else if(ret < 0)
3843 return ret;
3844 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3845 if (dmar_remove_dev_scope(info, atsr->segment,
3846 atsru->devices, atsru->devices_cnt))
3847 break;
3848 }
3849 }
3850
3851 return 0;
3852}
3853
Fenghua Yu99dcade2009-11-11 07:23:06 -08003854/*
3855 * Here we only respond to action of unbound device from driver.
3856 *
3857 * Added device is not attached to its DMAR domain here yet. That will happen
3858 * when mapping the device to iova.
3859 */
3860static int device_notifier(struct notifier_block *nb,
3861 unsigned long action, void *data)
3862{
3863 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08003864 struct dmar_domain *domain;
3865
David Woodhouse3d891942014-03-06 15:59:26 +00003866 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00003867 return 0;
3868
Jiang Liu7e7dfab2014-02-19 14:07:23 +08003869 if (action != BUS_NOTIFY_UNBOUND_DRIVER &&
3870 action != BUS_NOTIFY_DEL_DEVICE)
3871 return 0;
3872
David Woodhouse1525a292014-03-06 16:19:30 +00003873 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08003874 if (!domain)
3875 return 0;
3876
Jiang Liu3a5670e2014-02-19 14:07:33 +08003877 down_read(&dmar_global_lock);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07003878 domain_remove_one_dev_info(domain, dev);
Jiang Liu7e7dfab2014-02-19 14:07:23 +08003879 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3880 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3881 list_empty(&domain->devices))
3882 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08003883 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07003884
Fenghua Yu99dcade2009-11-11 07:23:06 -08003885 return 0;
3886}
3887
3888static struct notifier_block device_nb = {
3889 .notifier_call = device_notifier,
3890};
3891
Jiang Liu75f05562014-02-19 14:07:37 +08003892static int intel_iommu_memory_notifier(struct notifier_block *nb,
3893 unsigned long val, void *v)
3894{
3895 struct memory_notify *mhp = v;
3896 unsigned long long start, end;
3897 unsigned long start_vpfn, last_vpfn;
3898
3899 switch (val) {
3900 case MEM_GOING_ONLINE:
3901 start = mhp->start_pfn << PAGE_SHIFT;
3902 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
3903 if (iommu_domain_identity_map(si_domain, start, end)) {
3904 pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
3905 start, end);
3906 return NOTIFY_BAD;
3907 }
3908 break;
3909
3910 case MEM_OFFLINE:
3911 case MEM_CANCEL_ONLINE:
3912 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
3913 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
3914 while (start_vpfn <= last_vpfn) {
3915 struct iova *iova;
3916 struct dmar_drhd_unit *drhd;
3917 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003918 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08003919
3920 iova = find_iova(&si_domain->iovad, start_vpfn);
3921 if (iova == NULL) {
3922 pr_debug("dmar: failed get IOVA for PFN %lx\n",
3923 start_vpfn);
3924 break;
3925 }
3926
3927 iova = split_and_remove_iova(&si_domain->iovad, iova,
3928 start_vpfn, last_vpfn);
3929 if (iova == NULL) {
3930 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
3931 start_vpfn, last_vpfn);
3932 return NOTIFY_BAD;
3933 }
3934
David Woodhouseea8ea462014-03-05 17:09:32 +00003935 freelist = domain_unmap(si_domain, iova->pfn_lo,
3936 iova->pfn_hi);
3937
Jiang Liu75f05562014-02-19 14:07:37 +08003938 rcu_read_lock();
3939 for_each_active_iommu(iommu, drhd)
3940 iommu_flush_iotlb_psi(iommu, si_domain->id,
3941 iova->pfn_lo,
David Woodhouseea8ea462014-03-05 17:09:32 +00003942 iova->pfn_hi - iova->pfn_lo + 1,
3943 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08003944 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00003945 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08003946
3947 start_vpfn = iova->pfn_hi + 1;
3948 free_iova_mem(iova);
3949 }
3950 break;
3951 }
3952
3953 return NOTIFY_OK;
3954}
3955
3956static struct notifier_block intel_iommu_memory_nb = {
3957 .notifier_call = intel_iommu_memory_notifier,
3958 .priority = 0
3959};
3960
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003961int __init intel_iommu_init(void)
3962{
Jiang Liu9bdc5312014-01-06 14:18:27 +08003963 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09003964 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08003965 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003966
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003967 /* VT-d is required for a TXT/tboot launch, so enforce that */
3968 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003969
Jiang Liu3a5670e2014-02-19 14:07:33 +08003970 if (iommu_init_mempool()) {
3971 if (force_on)
3972 panic("tboot: Failed to initialize iommu memory\n");
3973 return -ENOMEM;
3974 }
3975
3976 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003977 if (dmar_table_init()) {
3978 if (force_on)
3979 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003980 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003981 }
3982
Takao Indoh3a93c842013-04-23 17:35:03 +09003983 /*
3984 * Disable translation if already enabled prior to OS handover.
3985 */
Jiang Liu7c919772014-01-06 14:18:18 +08003986 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09003987 if (iommu->gcmd & DMA_GCMD_TE)
3988 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09003989
Suresh Siddhac2c72862011-08-23 17:05:19 -07003990 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003991 if (force_on)
3992 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003993 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003994 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003995
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003996 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08003997 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07003998
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003999 if (list_empty(&dmar_rmrr_units))
4000 printk(KERN_INFO "DMAR: No RMRR found\n");
4001
4002 if (list_empty(&dmar_atsr_units))
4003 printk(KERN_INFO "DMAR: No ATSR found\n");
4004
Joseph Cihula51a63e62011-03-21 11:04:24 -07004005 if (dmar_init_reserved_ranges()) {
4006 if (force_on)
4007 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004008 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004009 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004010
4011 init_no_remapping_devices();
4012
Joseph Cihulab7792602011-05-03 00:08:37 -07004013 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004014 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004015 if (force_on)
4016 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004017 printk(KERN_ERR "IOMMU: dmar init failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004018 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004019 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004020 up_write(&dmar_global_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004021 printk(KERN_INFO
4022 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4023
mark gross5e0d2a62008-03-04 15:22:08 -08004024 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004025#ifdef CONFIG_SWIOTLB
4026 swiotlb = 0;
4027#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004028 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004029
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004030 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004031
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004032 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004033 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004034 if (si_domain && !hw_pass_through)
4035 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004036
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004037 intel_iommu_enabled = 1;
4038
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004039 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004040
4041out_free_reserved_range:
4042 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004043out_free_dmar:
4044 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004045 up_write(&dmar_global_lock);
4046 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004047 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004048}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004049
Han, Weidong3199aa62009-02-26 17:31:12 +08004050static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004051 struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004052{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004053 struct pci_dev *tmp, *parent, *pdev;
Han, Weidong3199aa62009-02-26 17:31:12 +08004054
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004055 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004056 return;
4057
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004058 pdev = to_pci_dev(dev);
4059
Han, Weidong3199aa62009-02-26 17:31:12 +08004060 /* dependent device detach */
4061 tmp = pci_find_upstream_pcie_bridge(pdev);
4062 /* Secondary interface's bus number and devfn 0 */
4063 if (tmp) {
4064 parent = pdev->bus->self;
4065 while (parent != tmp) {
4066 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01004067 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08004068 parent = parent->bus->self;
4069 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05004070 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08004071 iommu_detach_dev(iommu,
4072 tmp->subordinate->number, 0);
4073 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01004074 iommu_detach_dev(iommu, tmp->bus->number,
4075 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08004076 }
4077}
4078
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004079static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004080 struct device *dev)
Weidong Hanc7151a82008-12-08 22:51:37 +08004081{
Yijing Wangbca2b912013-10-31 17:26:04 +08004082 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08004083 struct intel_iommu *iommu;
4084 unsigned long flags;
4085 int found = 0;
David Woodhouse156baca2014-03-09 14:00:57 -07004086 u8 bus, devfn;
Weidong Hanc7151a82008-12-08 22:51:37 +08004087
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004088 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08004089 if (!iommu)
4090 return;
4091
4092 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08004093 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004094 if (info->iommu == iommu && info->bus == bus &&
4095 info->devfn == devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01004096 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004097 spin_unlock_irqrestore(&device_domain_lock, flags);
4098
Yu Zhao93a23a72009-05-18 13:51:37 +08004099 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004100 iommu_detach_dev(iommu, info->bus, info->devfn);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004101 iommu_detach_dependent_devices(iommu, dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08004102 free_devinfo_mem(info);
4103
4104 spin_lock_irqsave(&device_domain_lock, flags);
4105
4106 if (found)
4107 break;
4108 else
4109 continue;
4110 }
4111
4112 /* if there is no other devices under the same iommu
4113 * owned by this domain, clear this iommu in iommu_bmp
4114 * update iommu count and coherency
4115 */
David Woodhouse8bbc4412014-03-09 13:52:37 -07004116 if (info->iommu == iommu)
Weidong Hanc7151a82008-12-08 22:51:37 +08004117 found = 1;
4118 }
4119
Roland Dreier3e7abe22011-07-20 06:22:21 -07004120 spin_unlock_irqrestore(&device_domain_lock, flags);
4121
Weidong Hanc7151a82008-12-08 22:51:37 +08004122 if (found == 0) {
4123 unsigned long tmp_flags;
4124 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08004125 clear_bit(iommu->seq_id, domain->iommu_bmp);
Weidong Hanc7151a82008-12-08 22:51:37 +08004126 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08004127 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08004128 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07004129
Alex Williamson9b4554b2011-05-24 12:19:04 -04004130 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
4131 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
4132 spin_lock_irqsave(&iommu->lock, tmp_flags);
4133 clear_bit(domain->id, iommu->domain_ids);
4134 iommu->domains[domain->id] = NULL;
4135 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
4136 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004137 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004138}
4139
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004140static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08004141{
4142 int adjust_width;
4143
4144 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004145 domain_reserve_special_ranges(domain);
4146
4147 /* calculate AGAW */
4148 domain->gaw = guest_width;
4149 adjust_width = guestwidth_to_adjustwidth(guest_width);
4150 domain->agaw = width_to_agaw(adjust_width);
4151
Weidong Han5e98c4b2008-12-08 23:03:27 +08004152 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004153 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004154 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004155 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07004156 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004157
4158 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004159 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004160 if (!domain->pgd)
4161 return -ENOMEM;
4162 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4163 return 0;
4164}
4165
Joerg Roedel5d450802008-12-03 14:52:32 +01004166static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004167{
Joerg Roedel5d450802008-12-03 14:52:32 +01004168 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004169
Jiang Liu92d03cc2014-02-19 14:07:28 +08004170 dmar_domain = alloc_domain(true);
Joerg Roedel5d450802008-12-03 14:52:32 +01004171 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03004172 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004173 "intel_iommu_domain_init: dmar_domain == NULL\n");
4174 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004175 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004176 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03004177 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004178 "intel_iommu_domain_init() failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004179 domain_exit(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004180 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004181 }
Allen Kay8140a952011-10-14 12:32:17 -07004182 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004183 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004184
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004185 domain->geometry.aperture_start = 0;
4186 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4187 domain->geometry.force_aperture = true;
4188
Joerg Roedel5d450802008-12-03 14:52:32 +01004189 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004190}
Kay, Allen M38717942008-09-09 18:37:29 +03004191
Joerg Roedel5d450802008-12-03 14:52:32 +01004192static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004193{
Joerg Roedel5d450802008-12-03 14:52:32 +01004194 struct dmar_domain *dmar_domain = domain->priv;
4195
4196 domain->priv = NULL;
Jiang Liu92d03cc2014-02-19 14:07:28 +08004197 domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004198}
Kay, Allen M38717942008-09-09 18:37:29 +03004199
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004200static int intel_iommu_attach_device(struct iommu_domain *domain,
4201 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004202{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004203 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004204 struct intel_iommu *iommu;
4205 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004206 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004207
David Woodhouse7207d8f2014-03-09 16:31:06 -07004208 /* normally dev is not mapped */
4209 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004210 struct dmar_domain *old_domain;
4211
David Woodhouse1525a292014-03-06 16:19:30 +00004212 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004213 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004214 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
4215 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004216 domain_remove_one_dev_info(old_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004217 else
4218 domain_remove_dev_info(old_domain);
4219 }
4220 }
4221
David Woodhouse156baca2014-03-09 14:00:57 -07004222 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004223 if (!iommu)
4224 return -ENODEV;
4225
4226 /* check if this iommu agaw is sufficient for max mapped address */
4227 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004228 if (addr_width > cap_mgaw(iommu->cap))
4229 addr_width = cap_mgaw(iommu->cap);
4230
4231 if (dmar_domain->max_addr > (1LL << addr_width)) {
4232 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004233 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004234 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004235 return -EFAULT;
4236 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004237 dmar_domain->gaw = addr_width;
4238
4239 /*
4240 * Knock out extra levels of page tables if necessary
4241 */
4242 while (iommu->agaw < dmar_domain->agaw) {
4243 struct dma_pte *pte;
4244
4245 pte = dmar_domain->pgd;
4246 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004247 dmar_domain->pgd = (struct dma_pte *)
4248 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004249 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004250 }
4251 dmar_domain->agaw--;
4252 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004253
David Woodhouse5913c9b2014-03-09 16:27:31 -07004254 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004255}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004256
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004257static void intel_iommu_detach_device(struct iommu_domain *domain,
4258 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004259{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004260 struct dmar_domain *dmar_domain = domain->priv;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004261
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004262 domain_remove_one_dev_info(dmar_domain, dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004263}
Kay, Allen M38717942008-09-09 18:37:29 +03004264
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004265static int intel_iommu_map(struct iommu_domain *domain,
4266 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004267 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004268{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004269 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004270 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004271 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004272 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004273
Joerg Roedeldde57a22008-12-03 15:04:09 +01004274 if (iommu_prot & IOMMU_READ)
4275 prot |= DMA_PTE_READ;
4276 if (iommu_prot & IOMMU_WRITE)
4277 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004278 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4279 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004280
David Woodhouse163cc522009-06-28 00:51:17 +01004281 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004282 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004283 u64 end;
4284
4285 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004286 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004287 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004288 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004289 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004290 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004291 return -EFAULT;
4292 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004293 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004294 }
David Woodhousead051222009-06-28 14:22:28 +01004295 /* Round up size to next multiple of PAGE_SIZE, if it and
4296 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004297 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004298 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4299 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004300 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004301}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004302
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004303static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004304 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004305{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004306 struct dmar_domain *dmar_domain = domain->priv;
David Woodhouseea8ea462014-03-05 17:09:32 +00004307 struct page *freelist = NULL;
4308 struct intel_iommu *iommu;
4309 unsigned long start_pfn, last_pfn;
4310 unsigned int npages;
4311 int iommu_id, num, ndomains, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004312
David Woodhouse5cf0a762014-03-19 16:07:49 +00004313 /* Cope with horrid API which requires us to unmap more than the
4314 size argument if it happens to be a large-page mapping. */
4315 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4316 BUG();
4317
4318 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4319 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4320
David Woodhouseea8ea462014-03-05 17:09:32 +00004321 start_pfn = iova >> VTD_PAGE_SHIFT;
4322 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4323
4324 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4325
4326 npages = last_pfn - start_pfn + 1;
4327
4328 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4329 iommu = g_iommus[iommu_id];
4330
4331 /*
4332 * find bit position of dmar_domain
4333 */
4334 ndomains = cap_ndoms(iommu->cap);
4335 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4336 if (iommu->domains[num] == dmar_domain)
4337 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4338 npages, !freelist, 0);
4339 }
4340
4341 }
4342
4343 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004344
David Woodhouse163cc522009-06-28 00:51:17 +01004345 if (dmar_domain->max_addr == iova + size)
4346 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004347
David Woodhouse5cf0a762014-03-19 16:07:49 +00004348 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004349}
Kay, Allen M38717942008-09-09 18:37:29 +03004350
Joerg Roedeld14d6572008-12-03 15:06:57 +01004351static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05304352 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004353{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004354 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004355 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004356 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004357 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004358
David Woodhouse5cf0a762014-03-19 16:07:49 +00004359 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004360 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004361 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004362
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004363 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004364}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004365
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004366static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4367 unsigned long cap)
4368{
4369 struct dmar_domain *dmar_domain = domain->priv;
4370
4371 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4372 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004373 if (cap == IOMMU_CAP_INTR_REMAP)
Suresh Siddha95a02e92012-03-30 11:47:07 -07004374 return irq_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004375
4376 return 0;
4377}
4378
Alex Williamson783f1572012-05-30 14:19:43 -06004379#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4380
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004381static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004382{
4383 struct pci_dev *pdev = to_pci_dev(dev);
Alex Williamson3da4af0a2012-11-13 10:22:03 -07004384 struct pci_dev *bridge, *dma_pdev = NULL;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004385 struct iommu_group *group;
4386 int ret;
David Woodhouse156baca2014-03-09 14:00:57 -07004387 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004388
David Woodhouse156baca2014-03-09 14:00:57 -07004389 if (!device_to_iommu(dev, &bus, &devfn))
Alex Williamson70ae6f02011-10-21 15:56:11 -04004390 return -ENODEV;
4391
4392 bridge = pci_find_upstream_pcie_bridge(pdev);
4393 if (bridge) {
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004394 if (pci_is_pcie(bridge))
4395 dma_pdev = pci_get_domain_bus_and_slot(
4396 pci_domain_nr(pdev->bus),
4397 bridge->subordinate->number, 0);
Alex Williamson3da4af0a2012-11-13 10:22:03 -07004398 if (!dma_pdev)
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004399 dma_pdev = pci_dev_get(bridge);
4400 } else
4401 dma_pdev = pci_dev_get(pdev);
4402
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004403 /* Account for quirked devices */
Alex Williamson783f1572012-05-30 14:19:43 -06004404 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
4405
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004406 /*
4407 * If it's a multifunction device that does not support our
Alex Williamsonc14d2692013-05-30 12:39:18 -06004408 * required ACS flags, add to the same group as lowest numbered
4409 * function that also does not suport the required ACS flags.
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004410 */
Alex Williamson783f1572012-05-30 14:19:43 -06004411 if (dma_pdev->multifunction &&
Alex Williamsonc14d2692013-05-30 12:39:18 -06004412 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
4413 u8 i, slot = PCI_SLOT(dma_pdev->devfn);
4414
4415 for (i = 0; i < 8; i++) {
4416 struct pci_dev *tmp;
4417
4418 tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
4419 if (!tmp)
4420 continue;
4421
4422 if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
4423 swap_pci_ref(&dma_pdev, tmp);
4424 break;
4425 }
4426 pci_dev_put(tmp);
4427 }
4428 }
Alex Williamson783f1572012-05-30 14:19:43 -06004429
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004430 /*
4431 * Devices on the root bus go through the iommu. If that's not us,
4432 * find the next upstream device and test ACS up to the root bus.
4433 * Finding the next device may require skipping virtual buses.
4434 */
Alex Williamson783f1572012-05-30 14:19:43 -06004435 while (!pci_is_root_bus(dma_pdev->bus)) {
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004436 struct pci_bus *bus = dma_pdev->bus;
4437
4438 while (!bus->self) {
4439 if (!pci_is_root_bus(bus))
4440 bus = bus->parent;
4441 else
4442 goto root_bus;
4443 }
4444
4445 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
Alex Williamson783f1572012-05-30 14:19:43 -06004446 break;
4447
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004448 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
Alex Williamson70ae6f02011-10-21 15:56:11 -04004449 }
4450
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004451root_bus:
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004452 group = iommu_group_get(&dma_pdev->dev);
4453 pci_dev_put(dma_pdev);
4454 if (!group) {
4455 group = iommu_group_alloc();
4456 if (IS_ERR(group))
4457 return PTR_ERR(group);
4458 }
Alex Williamsonbcb71ab2011-10-21 15:56:24 -04004459
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004460 ret = iommu_group_add_device(group, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004461
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004462 iommu_group_put(group);
4463 return ret;
4464}
4465
4466static void intel_iommu_remove_device(struct device *dev)
4467{
4468 iommu_group_remove_device(dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004469}
4470
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004471static struct iommu_ops intel_iommu_ops = {
4472 .domain_init = intel_iommu_domain_init,
4473 .domain_destroy = intel_iommu_domain_destroy,
4474 .attach_dev = intel_iommu_attach_device,
4475 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004476 .map = intel_iommu_map,
4477 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004478 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004479 .domain_has_cap = intel_iommu_domain_has_cap,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004480 .add_device = intel_iommu_add_device,
4481 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004482 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004483};
David Woodhouse9af88142009-02-13 23:18:03 +00004484
Daniel Vetter94526182013-01-20 23:50:13 +01004485static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4486{
4487 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4488 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4489 dmar_map_gfx = 0;
4490}
4491
4492DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4493DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4494DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4495DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4496DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4497DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4498DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4499
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004500static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004501{
4502 /*
4503 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004504 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004505 */
4506 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4507 rwbf_quirk = 1;
4508}
4509
4510DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004511DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4512DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4513DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4514DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4515DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4516DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004517
Adam Jacksoneecfd572010-08-25 21:17:34 +01004518#define GGC 0x52
4519#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4520#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4521#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4522#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4523#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4524#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4525#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4526#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4527
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004528static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004529{
4530 unsigned short ggc;
4531
Adam Jacksoneecfd572010-08-25 21:17:34 +01004532 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004533 return;
4534
Adam Jacksoneecfd572010-08-25 21:17:34 +01004535 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004536 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4537 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004538 } else if (dmar_map_gfx) {
4539 /* we have to ensure the gfx device is idle before we flush */
4540 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4541 intel_iommu_strict = 1;
4542 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004543}
4544DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4545DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4546DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4547DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4548
David Woodhousee0fc7e02009-09-30 09:12:17 -07004549/* On Tylersburg chipsets, some BIOSes have been known to enable the
4550 ISOCH DMAR unit for the Azalia sound device, but not give it any
4551 TLB entries, which causes it to deadlock. Check for that. We do
4552 this in a function called from init_dmars(), instead of in a PCI
4553 quirk, because we don't want to print the obnoxious "BIOS broken"
4554 message if VT-d is actually disabled.
4555*/
4556static void __init check_tylersburg_isoch(void)
4557{
4558 struct pci_dev *pdev;
4559 uint32_t vtisochctrl;
4560
4561 /* If there's no Azalia in the system anyway, forget it. */
4562 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4563 if (!pdev)
4564 return;
4565 pci_dev_put(pdev);
4566
4567 /* System Management Registers. Might be hidden, in which case
4568 we can't do the sanity check. But that's OK, because the
4569 known-broken BIOSes _don't_ actually hide it, so far. */
4570 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4571 if (!pdev)
4572 return;
4573
4574 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4575 pci_dev_put(pdev);
4576 return;
4577 }
4578
4579 pci_dev_put(pdev);
4580
4581 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4582 if (vtisochctrl & 1)
4583 return;
4584
4585 /* Drop all bits other than the number of TLB entries */
4586 vtisochctrl &= 0x1c;
4587
4588 /* If we have the recommended number of TLB entries (16), fine. */
4589 if (vtisochctrl == 0x10)
4590 return;
4591
4592 /* Zero TLB entries? You get to ride the short bus to school. */
4593 if (!vtisochctrl) {
4594 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4595 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4596 dmi_get_system_info(DMI_BIOS_VENDOR),
4597 dmi_get_system_info(DMI_BIOS_VERSION),
4598 dmi_get_system_info(DMI_PRODUCT_VERSION));
4599 iommu_identity_mapping |= IDENTMAP_AZALIA;
4600 return;
4601 }
4602
4603 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4604 vtisochctrl);
4605}