blob: 5d86e932c8dc2ef1d66da6bb06a6137097a6b540 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070018 */
19
20#include <linux/init.h>
21#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080022#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040023#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070024#include <linux/slab.h>
25#include <linux/irq.h>
26#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/spinlock.h>
28#include <linux/pci.h>
29#include <linux/dmar.h>
30#include <linux/dma-mapping.h>
31#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080032#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080033#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030034#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010035#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010037#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100039#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020040#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080041#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070042#include <linux/dma-contiguous.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070043#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070044#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090045#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046
Joerg Roedel078e1ee2012-09-26 12:44:43 +020047#include "irq_remapping.h"
Varun Sethi61e015a2013-04-23 10:05:24 +053048#include "pci.h"
Joerg Roedel078e1ee2012-09-26 12:44:43 +020049
Fenghua Yu5b6985c2008-10-16 18:02:32 -070050#define ROOT_SIZE VTD_PAGE_SIZE
51#define CONTEXT_SIZE VTD_PAGE_SIZE
52
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070053#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
54#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070055#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070056
57#define IOAPIC_RANGE_START (0xfee00000)
58#define IOAPIC_RANGE_END (0xfeefffff)
59#define IOVA_START_ADDR (0x1000)
60
61#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
62
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070063#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080064#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070065
David Woodhouse2ebe3152009-09-19 07:34:04 -070066#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
67#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
68
69/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
70 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
71#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
72 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
73#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070074
Mark McLoughlinf27be032008-11-20 15:49:43 +000075#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070076#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070077#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080078
Andrew Mortondf08cdc2010-09-22 13:05:11 -070079/* page table handling */
80#define LEVEL_STRIDE (9)
81#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
82
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020083/*
84 * This bitmap is used to advertise the page sizes our hardware support
85 * to the IOMMU core, which will then use this information to split
86 * physically contiguous memory regions it is mapping into page sizes
87 * that we support.
88 *
89 * Traditionally the IOMMU core just handed us the mappings directly,
90 * after making sure the size is an order of a 4KiB page and that the
91 * mapping has natural alignment.
92 *
93 * To retain this behavior, we currently advertise that we support
94 * all page sizes that are an order of 4KiB.
95 *
96 * If at some point we'd like to utilize the IOMMU core's new behavior,
97 * we could change this to advertise the real page sizes we support.
98 */
99#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
100
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700101static inline int agaw_to_level(int agaw)
102{
103 return agaw + 2;
104}
105
106static inline int agaw_to_width(int agaw)
107{
Jiang Liu5c645b32014-01-06 14:18:12 +0800108 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700109}
110
111static inline int width_to_agaw(int width)
112{
Jiang Liu5c645b32014-01-06 14:18:12 +0800113 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700114}
115
116static inline unsigned int level_to_offset_bits(int level)
117{
118 return (level - 1) * LEVEL_STRIDE;
119}
120
121static inline int pfn_level_offset(unsigned long pfn, int level)
122{
123 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
124}
125
126static inline unsigned long level_mask(int level)
127{
128 return -1UL << level_to_offset_bits(level);
129}
130
131static inline unsigned long level_size(int level)
132{
133 return 1UL << level_to_offset_bits(level);
134}
135
136static inline unsigned long align_to_level(unsigned long pfn, int level)
137{
138 return (pfn + level_size(level) - 1) & level_mask(level);
139}
David Woodhousefd18de52009-05-10 23:57:41 +0100140
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100141static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
142{
Jiang Liu5c645b32014-01-06 14:18:12 +0800143 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100144}
145
David Woodhousedd4e8312009-06-27 16:21:20 +0100146/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
147 are never going to work. */
148static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
149{
150 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
151}
152
153static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
154{
155 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
156}
157static inline unsigned long page_to_dma_pfn(struct page *pg)
158{
159 return mm_to_dma_pfn(page_to_pfn(pg));
160}
161static inline unsigned long virt_to_dma_pfn(void *p)
162{
163 return page_to_dma_pfn(virt_to_page(p));
164}
165
Weidong Hand9630fe2008-12-08 11:06:32 +0800166/* global iommu list, set NULL for ignored DMAR units */
167static struct intel_iommu **g_iommus;
168
David Woodhousee0fc7e02009-09-30 09:12:17 -0700169static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000170static int rwbf_quirk;
171
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000172/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700173 * set to 1 to panic kernel if can't successfully enable VT-d
174 * (used when kernel is launched w/ TXT)
175 */
176static int force_on = 0;
177
178/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000179 * 0: Present
180 * 1-11: Reserved
181 * 12-63: Context Ptr (12 - (haw-1))
182 * 64-127: Reserved
183 */
184struct root_entry {
185 u64 val;
186 u64 rsvd1;
187};
188#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
189static inline bool root_present(struct root_entry *root)
190{
191 return (root->val & 1);
192}
193static inline void set_root_present(struct root_entry *root)
194{
195 root->val |= 1;
196}
197static inline void set_root_value(struct root_entry *root, unsigned long value)
198{
199 root->val |= value & VTD_PAGE_MASK;
200}
201
202static inline struct context_entry *
203get_context_addr_from_root(struct root_entry *root)
204{
205 return (struct context_entry *)
206 (root_present(root)?phys_to_virt(
207 root->val & VTD_PAGE_MASK) :
208 NULL);
209}
210
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000211/*
212 * low 64 bits:
213 * 0: present
214 * 1: fault processing disable
215 * 2-3: translation type
216 * 12-63: address space root
217 * high 64 bits:
218 * 0-2: address width
219 * 3-6: aval
220 * 8-23: domain id
221 */
222struct context_entry {
223 u64 lo;
224 u64 hi;
225};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000226
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000227static inline bool context_present(struct context_entry *context)
228{
229 return (context->lo & 1);
230}
231static inline void context_set_present(struct context_entry *context)
232{
233 context->lo |= 1;
234}
235
236static inline void context_set_fault_enable(struct context_entry *context)
237{
238 context->lo &= (((u64)-1) << 2) | 1;
239}
240
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000241static inline void context_set_translation_type(struct context_entry *context,
242 unsigned long value)
243{
244 context->lo &= (((u64)-1) << 4) | 3;
245 context->lo |= (value & 3) << 2;
246}
247
248static inline void context_set_address_root(struct context_entry *context,
249 unsigned long value)
250{
251 context->lo |= value & VTD_PAGE_MASK;
252}
253
254static inline void context_set_address_width(struct context_entry *context,
255 unsigned long value)
256{
257 context->hi |= value & 7;
258}
259
260static inline void context_set_domain_id(struct context_entry *context,
261 unsigned long value)
262{
263 context->hi |= (value & ((1 << 16) - 1)) << 8;
264}
265
266static inline void context_clear_entry(struct context_entry *context)
267{
268 context->lo = 0;
269 context->hi = 0;
270}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000271
Mark McLoughlin622ba122008-11-20 15:49:46 +0000272/*
273 * 0: readable
274 * 1: writable
275 * 2-6: reserved
276 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800277 * 8-10: available
278 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000279 * 12-63: Host physcial address
280 */
281struct dma_pte {
282 u64 val;
283};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000284
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000285static inline void dma_clear_pte(struct dma_pte *pte)
286{
287 pte->val = 0;
288}
289
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000290static inline u64 dma_pte_addr(struct dma_pte *pte)
291{
David Woodhousec85994e2009-07-01 19:21:24 +0100292#ifdef CONFIG_64BIT
293 return pte->val & VTD_PAGE_MASK;
294#else
295 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100296 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100297#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000298}
299
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000300static inline bool dma_pte_present(struct dma_pte *pte)
301{
302 return (pte->val & 3) != 0;
303}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000304
Allen Kay4399c8b2011-10-14 12:32:46 -0700305static inline bool dma_pte_superpage(struct dma_pte *pte)
306{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200307 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700308}
309
David Woodhouse75e6bf92009-07-02 11:21:16 +0100310static inline int first_pte_in_page(struct dma_pte *pte)
311{
312 return !((unsigned long)pte & ~VTD_PAGE_MASK);
313}
314
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700315/*
316 * This domain is a statically identity mapping domain.
317 * 1. This domain creats a static 1:1 mapping to all usable memory.
318 * 2. It maps to each iommu if successful.
319 * 3. Each iommu mapps to this domain if successful.
320 */
David Woodhouse19943b02009-08-04 16:19:20 +0100321static struct dmar_domain *si_domain;
322static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700323
Weidong Han3b5410e2008-12-08 09:17:15 +0800324/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100325#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800326
Weidong Han1ce28fe2008-12-08 16:35:39 +0800327/* domain represents a virtual machine, more than one devices
328 * across iommus may be owned in one domain, e.g. kvm guest.
329 */
330#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
331
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700332/* si_domain contains mulitple devices */
333#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
334
Mike Travis1b198bb2012-03-05 15:05:16 -0800335/* define the limit of IOMMUs supported in each domain */
336#ifdef CONFIG_X86
337# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
338#else
339# define IOMMU_UNITS_SUPPORTED 64
340#endif
341
Mark McLoughlin99126f72008-11-20 15:49:47 +0000342struct dmar_domain {
343 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700344 int nid; /* node id */
Mike Travis1b198bb2012-03-05 15:05:16 -0800345 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
346 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000347
348 struct list_head devices; /* all devices' list */
349 struct iova_domain iovad; /* iova's that belong to this domain */
350
351 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000352 int gaw; /* max guest address width */
353
354 /* adjusted guest address width, 0 is level 2 30-bit */
355 int agaw;
356
Weidong Han3b5410e2008-12-08 09:17:15 +0800357 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800358
359 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800360 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800361 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100362 int iommu_superpage;/* Level of superpages supported:
363 0 == 4KiB (no superpages), 1 == 2MiB,
364 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800365 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800366 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000367};
368
Mark McLoughlina647dac2008-11-20 15:49:48 +0000369/* PCI domain-device relationship */
370struct device_domain_info {
371 struct list_head link; /* link to domain siblings */
372 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100373 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000374 u8 devfn; /* PCI devfn number */
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000375 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800376 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000377 struct dmar_domain *domain; /* pointer to domain */
378};
379
Jiang Liub94e4112014-02-19 14:07:25 +0800380struct dmar_rmrr_unit {
381 struct list_head list; /* list of rmrr units */
382 struct acpi_dmar_header *hdr; /* ACPI header */
383 u64 base_address; /* reserved base address*/
384 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000385 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800386 int devices_cnt; /* target device count */
387};
388
389struct dmar_atsr_unit {
390 struct list_head list; /* list of ATSR units */
391 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000392 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800393 int devices_cnt; /* target device count */
394 u8 include_all:1; /* include all ports */
395};
396
397static LIST_HEAD(dmar_atsr_units);
398static LIST_HEAD(dmar_rmrr_units);
399
400#define for_each_rmrr_units(rmrr) \
401 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
402
mark gross5e0d2a62008-03-04 15:22:08 -0800403static void flush_unmaps_timeout(unsigned long data);
404
Jiang Liub707cb02014-01-06 14:18:26 +0800405static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800406
mark gross80b20dd2008-04-18 13:53:58 -0700407#define HIGH_WATER_MARK 250
408struct deferred_flush_tables {
409 int next;
410 struct iova *iova[HIGH_WATER_MARK];
411 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000412 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700413};
414
415static struct deferred_flush_tables *deferred_flush;
416
mark gross5e0d2a62008-03-04 15:22:08 -0800417/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800418static int g_num_of_iommus;
419
420static DEFINE_SPINLOCK(async_umap_flush_lock);
421static LIST_HEAD(unmaps_to_do);
422
423static int timer_on;
424static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800425
Jiang Liu92d03cc2014-02-19 14:07:28 +0800426static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700427static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800428static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -0700429 struct device *dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800430static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000431 struct device *dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700432
Suresh Siddhad3f13812011-08-23 17:05:25 -0700433#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800434int dmar_disabled = 0;
435#else
436int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700437#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800438
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200439int intel_iommu_enabled = 0;
440EXPORT_SYMBOL_GPL(intel_iommu_enabled);
441
David Woodhouse2d9e6672010-06-15 10:57:57 +0100442static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700443static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800444static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100445static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700446
David Woodhousec0771df2011-10-14 20:59:46 +0100447int intel_iommu_gfx_mapped;
448EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
449
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700450#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
451static DEFINE_SPINLOCK(device_domain_lock);
452static LIST_HEAD(device_domain_list);
453
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100454static struct iommu_ops intel_iommu_ops;
455
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700456static int __init intel_iommu_setup(char *str)
457{
458 if (!str)
459 return -EINVAL;
460 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800461 if (!strncmp(str, "on", 2)) {
462 dmar_disabled = 0;
463 printk(KERN_INFO "Intel-IOMMU: enabled\n");
464 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700465 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800466 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700467 } else if (!strncmp(str, "igfx_off", 8)) {
468 dmar_map_gfx = 0;
469 printk(KERN_INFO
470 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700471 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800472 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700473 "Intel-IOMMU: Forcing DAC for PCI devices\n");
474 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800475 } else if (!strncmp(str, "strict", 6)) {
476 printk(KERN_INFO
477 "Intel-IOMMU: disable batched IOTLB flush\n");
478 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100479 } else if (!strncmp(str, "sp_off", 6)) {
480 printk(KERN_INFO
481 "Intel-IOMMU: disable supported super page\n");
482 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700483 }
484
485 str += strcspn(str, ",");
486 while (*str == ',')
487 str++;
488 }
489 return 0;
490}
491__setup("intel_iommu=", intel_iommu_setup);
492
493static struct kmem_cache *iommu_domain_cache;
494static struct kmem_cache *iommu_devinfo_cache;
495static struct kmem_cache *iommu_iova_cache;
496
Suresh Siddha4c923d42009-10-02 11:01:24 -0700497static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700498{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700499 struct page *page;
500 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700501
Suresh Siddha4c923d42009-10-02 11:01:24 -0700502 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
503 if (page)
504 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700505 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700506}
507
508static inline void free_pgtable_page(void *vaddr)
509{
510 free_page((unsigned long)vaddr);
511}
512
513static inline void *alloc_domain_mem(void)
514{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900515 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700516}
517
Kay, Allen M38717942008-09-09 18:37:29 +0300518static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700519{
520 kmem_cache_free(iommu_domain_cache, vaddr);
521}
522
523static inline void * alloc_devinfo_mem(void)
524{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900525 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700526}
527
528static inline void free_devinfo_mem(void *vaddr)
529{
530 kmem_cache_free(iommu_devinfo_cache, vaddr);
531}
532
533struct iova *alloc_iova_mem(void)
534{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900535 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700536}
537
538void free_iova_mem(struct iova *iova)
539{
540 kmem_cache_free(iommu_iova_cache, iova);
541}
542
Weidong Han1b573682008-12-08 15:34:06 +0800543
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700544static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800545{
546 unsigned long sagaw;
547 int agaw = -1;
548
549 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700550 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800551 agaw >= 0; agaw--) {
552 if (test_bit(agaw, &sagaw))
553 break;
554 }
555
556 return agaw;
557}
558
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700559/*
560 * Calculate max SAGAW for each iommu.
561 */
562int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
563{
564 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
565}
566
567/*
568 * calculate agaw for each iommu.
569 * "SAGAW" may be different across iommus, use a default agaw, and
570 * get a supported less agaw for iommus that don't support the default agaw.
571 */
572int iommu_calculate_agaw(struct intel_iommu *iommu)
573{
574 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
575}
576
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700577/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800578static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
579{
580 int iommu_id;
581
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700582 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800583 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700584 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800585
Mike Travis1b198bb2012-03-05 15:05:16 -0800586 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800587 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
588 return NULL;
589
590 return g_iommus[iommu_id];
591}
592
Weidong Han8e6040972008-12-08 15:49:06 +0800593static void domain_update_iommu_coherency(struct dmar_domain *domain)
594{
David Woodhoused0501962014-03-11 17:10:29 -0700595 struct dmar_drhd_unit *drhd;
596 struct intel_iommu *iommu;
597 int i, found = 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800598
David Woodhoused0501962014-03-11 17:10:29 -0700599 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800600
Mike Travis1b198bb2012-03-05 15:05:16 -0800601 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
David Woodhoused0501962014-03-11 17:10:29 -0700602 found = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800603 if (!ecap_coherent(g_iommus[i]->ecap)) {
604 domain->iommu_coherency = 0;
605 break;
606 }
Weidong Han8e6040972008-12-08 15:49:06 +0800607 }
David Woodhoused0501962014-03-11 17:10:29 -0700608 if (found)
609 return;
610
611 /* No hardware attached; use lowest common denominator */
612 rcu_read_lock();
613 for_each_active_iommu(iommu, drhd) {
614 if (!ecap_coherent(iommu->ecap)) {
615 domain->iommu_coherency = 0;
616 break;
617 }
618 }
619 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800620}
621
Sheng Yang58c610b2009-03-18 15:33:05 +0800622static void domain_update_iommu_snooping(struct dmar_domain *domain)
623{
624 int i;
625
626 domain->iommu_snooping = 1;
627
Mike Travis1b198bb2012-03-05 15:05:16 -0800628 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800629 if (!ecap_sc_support(g_iommus[i]->ecap)) {
630 domain->iommu_snooping = 0;
631 break;
632 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800633 }
634}
635
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100636static void domain_update_iommu_superpage(struct dmar_domain *domain)
637{
Allen Kay8140a952011-10-14 12:32:17 -0700638 struct dmar_drhd_unit *drhd;
639 struct intel_iommu *iommu = NULL;
640 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100641
642 if (!intel_iommu_superpage) {
643 domain->iommu_superpage = 0;
644 return;
645 }
646
Allen Kay8140a952011-10-14 12:32:17 -0700647 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800648 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700649 for_each_active_iommu(iommu, drhd) {
650 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100651 if (!mask) {
652 break;
653 }
654 }
Jiang Liu0e242612014-02-19 14:07:34 +0800655 rcu_read_unlock();
656
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100657 domain->iommu_superpage = fls(mask);
658}
659
Sheng Yang58c610b2009-03-18 15:33:05 +0800660/* Some capabilities may be different across iommus */
661static void domain_update_iommu_cap(struct dmar_domain *domain)
662{
663 domain_update_iommu_coherency(domain);
664 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100665 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800666}
667
David Woodhouse156baca2014-03-09 14:00:57 -0700668static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800669{
670 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800671 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700672 struct device *tmp;
673 struct pci_dev *ptmp, *pdev = NULL;
674 u16 segment;
Weidong Hanc7151a82008-12-08 22:51:37 +0800675 int i;
676
David Woodhouse156baca2014-03-09 14:00:57 -0700677 if (dev_is_pci(dev)) {
678 pdev = to_pci_dev(dev);
679 segment = pci_domain_nr(pdev->bus);
680 } else if (ACPI_COMPANION(dev))
681 dev = &ACPI_COMPANION(dev)->dev;
682
Jiang Liu0e242612014-02-19 14:07:34 +0800683 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800684 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700685 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100686 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800687
Jiang Liub683b232014-02-19 14:07:32 +0800688 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700689 drhd->devices_cnt, i, tmp) {
690 if (tmp == dev) {
691 *bus = drhd->devices[i].bus;
692 *devfn = drhd->devices[i].devfn;
693 goto out;
694 }
695
696 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000697 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700698
699 ptmp = to_pci_dev(tmp);
700 if (ptmp->subordinate &&
701 ptmp->subordinate->number <= pdev->bus->number &&
702 ptmp->subordinate->busn_res.end >= pdev->bus->number)
703 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100704 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800705
David Woodhouse156baca2014-03-09 14:00:57 -0700706 if (pdev && drhd->include_all) {
707 got_pdev:
708 *bus = pdev->bus->number;
709 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800710 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700711 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800712 }
Jiang Liub683b232014-02-19 14:07:32 +0800713 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700714 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800715 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800716
Jiang Liub683b232014-02-19 14:07:32 +0800717 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800718}
719
Weidong Han5331fe62008-12-08 23:00:00 +0800720static void domain_flush_cache(struct dmar_domain *domain,
721 void *addr, int size)
722{
723 if (!domain->iommu_coherency)
724 clflush_cache_range(addr, size);
725}
726
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700727/* Gets context entry for a given bus and devfn */
728static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
729 u8 bus, u8 devfn)
730{
731 struct root_entry *root;
732 struct context_entry *context;
733 unsigned long phy_addr;
734 unsigned long flags;
735
736 spin_lock_irqsave(&iommu->lock, flags);
737 root = &iommu->root_entry[bus];
738 context = get_context_addr_from_root(root);
739 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700740 context = (struct context_entry *)
741 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700742 if (!context) {
743 spin_unlock_irqrestore(&iommu->lock, flags);
744 return NULL;
745 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700746 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700747 phy_addr = virt_to_phys((void *)context);
748 set_root_value(root, phy_addr);
749 set_root_present(root);
750 __iommu_flush_cache(iommu, root, sizeof(*root));
751 }
752 spin_unlock_irqrestore(&iommu->lock, flags);
753 return &context[devfn];
754}
755
756static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
757{
758 struct root_entry *root;
759 struct context_entry *context;
760 int ret;
761 unsigned long flags;
762
763 spin_lock_irqsave(&iommu->lock, flags);
764 root = &iommu->root_entry[bus];
765 context = get_context_addr_from_root(root);
766 if (!context) {
767 ret = 0;
768 goto out;
769 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000770 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700771out:
772 spin_unlock_irqrestore(&iommu->lock, flags);
773 return ret;
774}
775
776static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
777{
778 struct root_entry *root;
779 struct context_entry *context;
780 unsigned long flags;
781
782 spin_lock_irqsave(&iommu->lock, flags);
783 root = &iommu->root_entry[bus];
784 context = get_context_addr_from_root(root);
785 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000786 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700787 __iommu_flush_cache(iommu, &context[devfn], \
788 sizeof(*context));
789 }
790 spin_unlock_irqrestore(&iommu->lock, flags);
791}
792
793static void free_context_table(struct intel_iommu *iommu)
794{
795 struct root_entry *root;
796 int i;
797 unsigned long flags;
798 struct context_entry *context;
799
800 spin_lock_irqsave(&iommu->lock, flags);
801 if (!iommu->root_entry) {
802 goto out;
803 }
804 for (i = 0; i < ROOT_ENTRY_NR; i++) {
805 root = &iommu->root_entry[i];
806 context = get_context_addr_from_root(root);
807 if (context)
808 free_pgtable_page(context);
809 }
810 free_pgtable_page(iommu->root_entry);
811 iommu->root_entry = NULL;
812out:
813 spin_unlock_irqrestore(&iommu->lock, flags);
814}
815
David Woodhouseb026fd22009-06-28 10:37:25 +0100816static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000817 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700818{
David Woodhouseb026fd22009-06-28 10:37:25 +0100819 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700820 struct dma_pte *parent, *pte = NULL;
821 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700822 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700823
824 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200825
826 if (addr_width < BITS_PER_LONG && pfn >> addr_width)
827 /* Address beyond IOMMU's addressing capabilities. */
828 return NULL;
829
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700830 parent = domain->pgd;
831
David Woodhouse5cf0a762014-03-19 16:07:49 +0000832 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700833 void *tmp_page;
834
David Woodhouseb026fd22009-06-28 10:37:25 +0100835 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700836 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000837 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100838 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000839 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700840 break;
841
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000842 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100843 uint64_t pteval;
844
Suresh Siddha4c923d42009-10-02 11:01:24 -0700845 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700846
David Woodhouse206a73c2009-07-01 19:30:28 +0100847 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700848 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +0100849
David Woodhousec85994e2009-07-01 19:21:24 +0100850 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400851 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100852 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
853 /* Someone else set it while we were thinking; use theirs. */
854 free_pgtable_page(tmp_page);
855 } else {
856 dma_pte_addr(pte);
857 domain_flush_cache(domain, pte, sizeof(*pte));
858 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700859 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000860 if (level == 1)
861 break;
862
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000863 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700864 level--;
865 }
866
David Woodhouse5cf0a762014-03-19 16:07:49 +0000867 if (!*target_level)
868 *target_level = level;
869
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700870 return pte;
871}
872
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100873
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700874/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100875static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
876 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100877 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700878{
879 struct dma_pte *parent, *pte = NULL;
880 int total = agaw_to_level(domain->agaw);
881 int offset;
882
883 parent = domain->pgd;
884 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100885 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700886 pte = &parent[offset];
887 if (level == total)
888 return pte;
889
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100890 if (!dma_pte_present(pte)) {
891 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700892 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100893 }
894
Yijing Wange16922a2014-05-20 20:37:51 +0800895 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100896 *large_page = total;
897 return pte;
898 }
899
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000900 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700901 total--;
902 }
903 return NULL;
904}
905
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700906/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +0000907static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100908 unsigned long start_pfn,
909 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700910{
David Woodhouse04b18e62009-06-27 19:15:01 +0100911 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100912 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100913 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700914
David Woodhouse04b18e62009-06-27 19:15:01 +0100915 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100916 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700917 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100918
David Woodhouse04b18e62009-06-27 19:15:01 +0100919 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700920 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100921 large_page = 1;
922 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100923 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100924 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100925 continue;
926 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100927 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100928 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100929 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100930 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100931 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
932
David Woodhouse310a5ab2009-06-28 18:52:20 +0100933 domain_flush_cache(domain, first_pte,
934 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700935
936 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700937}
938
Alex Williamson3269ee02013-06-15 10:27:19 -0600939static void dma_pte_free_level(struct dmar_domain *domain, int level,
940 struct dma_pte *pte, unsigned long pfn,
941 unsigned long start_pfn, unsigned long last_pfn)
942{
943 pfn = max(start_pfn, pfn);
944 pte = &pte[pfn_level_offset(pfn, level)];
945
946 do {
947 unsigned long level_pfn;
948 struct dma_pte *level_pte;
949
950 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
951 goto next;
952
953 level_pfn = pfn & level_mask(level - 1);
954 level_pte = phys_to_virt(dma_pte_addr(pte));
955
956 if (level > 2)
957 dma_pte_free_level(domain, level - 1, level_pte,
958 level_pfn, start_pfn, last_pfn);
959
960 /* If range covers entire pagetable, free it */
961 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -0800962 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -0600963 dma_clear_pte(pte);
964 domain_flush_cache(domain, pte, sizeof(*pte));
965 free_pgtable_page(level_pte);
966 }
967next:
968 pfn += level_size(level);
969 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
970}
971
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700972/* free page table pages. last level pte should already be cleared */
973static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100974 unsigned long start_pfn,
975 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700976{
David Woodhouse6660c632009-06-27 22:41:00 +0100977 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700978
David Woodhouse6660c632009-06-27 22:41:00 +0100979 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
980 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700981 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700982
David Woodhousef3a0a522009-06-30 03:40:07 +0100983 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -0600984 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
985 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +0100986
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700987 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100988 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700989 free_pgtable_page(domain->pgd);
990 domain->pgd = NULL;
991 }
992}
993
David Woodhouseea8ea462014-03-05 17:09:32 +0000994/* When a page at a given level is being unlinked from its parent, we don't
995 need to *modify* it at all. All we need to do is make a list of all the
996 pages which can be freed just as soon as we've flushed the IOTLB and we
997 know the hardware page-walk will no longer touch them.
998 The 'pte' argument is the *parent* PTE, pointing to the page that is to
999 be freed. */
1000static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1001 int level, struct dma_pte *pte,
1002 struct page *freelist)
1003{
1004 struct page *pg;
1005
1006 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1007 pg->freelist = freelist;
1008 freelist = pg;
1009
1010 if (level == 1)
1011 return freelist;
1012
Jiang Liuadeb25902014-04-09 10:20:39 +08001013 pte = page_address(pg);
1014 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001015 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1016 freelist = dma_pte_list_pagetables(domain, level - 1,
1017 pte, freelist);
Jiang Liuadeb25902014-04-09 10:20:39 +08001018 pte++;
1019 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001020
1021 return freelist;
1022}
1023
1024static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1025 struct dma_pte *pte, unsigned long pfn,
1026 unsigned long start_pfn,
1027 unsigned long last_pfn,
1028 struct page *freelist)
1029{
1030 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1031
1032 pfn = max(start_pfn, pfn);
1033 pte = &pte[pfn_level_offset(pfn, level)];
1034
1035 do {
1036 unsigned long level_pfn;
1037
1038 if (!dma_pte_present(pte))
1039 goto next;
1040
1041 level_pfn = pfn & level_mask(level);
1042
1043 /* If range covers entire pagetable, free it */
1044 if (start_pfn <= level_pfn &&
1045 last_pfn >= level_pfn + level_size(level) - 1) {
1046 /* These suborbinate page tables are going away entirely. Don't
1047 bother to clear them; we're just going to *free* them. */
1048 if (level > 1 && !dma_pte_superpage(pte))
1049 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1050
1051 dma_clear_pte(pte);
1052 if (!first_pte)
1053 first_pte = pte;
1054 last_pte = pte;
1055 } else if (level > 1) {
1056 /* Recurse down into a level that isn't *entirely* obsolete */
1057 freelist = dma_pte_clear_level(domain, level - 1,
1058 phys_to_virt(dma_pte_addr(pte)),
1059 level_pfn, start_pfn, last_pfn,
1060 freelist);
1061 }
1062next:
1063 pfn += level_size(level);
1064 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1065
1066 if (first_pte)
1067 domain_flush_cache(domain, first_pte,
1068 (void *)++last_pte - (void *)first_pte);
1069
1070 return freelist;
1071}
1072
1073/* We can't just free the pages because the IOMMU may still be walking
1074 the page tables, and may have cached the intermediate levels. The
1075 pages can only be freed after the IOTLB flush has been done. */
1076struct page *domain_unmap(struct dmar_domain *domain,
1077 unsigned long start_pfn,
1078 unsigned long last_pfn)
1079{
1080 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1081 struct page *freelist = NULL;
1082
1083 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
1084 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
1085 BUG_ON(start_pfn > last_pfn);
1086
1087 /* we don't need lock here; nobody else touches the iova range */
1088 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1089 domain->pgd, 0, start_pfn, last_pfn, NULL);
1090
1091 /* free pgd */
1092 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1093 struct page *pgd_page = virt_to_page(domain->pgd);
1094 pgd_page->freelist = freelist;
1095 freelist = pgd_page;
1096
1097 domain->pgd = NULL;
1098 }
1099
1100 return freelist;
1101}
1102
1103void dma_free_pagelist(struct page *freelist)
1104{
1105 struct page *pg;
1106
1107 while ((pg = freelist)) {
1108 freelist = pg->freelist;
1109 free_pgtable_page(page_address(pg));
1110 }
1111}
1112
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001113/* iommu handling */
1114static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1115{
1116 struct root_entry *root;
1117 unsigned long flags;
1118
Suresh Siddha4c923d42009-10-02 11:01:24 -07001119 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001120 if (!root)
1121 return -ENOMEM;
1122
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001123 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001124
1125 spin_lock_irqsave(&iommu->lock, flags);
1126 iommu->root_entry = root;
1127 spin_unlock_irqrestore(&iommu->lock, flags);
1128
1129 return 0;
1130}
1131
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001132static void iommu_set_root_entry(struct intel_iommu *iommu)
1133{
1134 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001135 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001136 unsigned long flag;
1137
1138 addr = iommu->root_entry;
1139
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001140 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001141 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
1142
David Woodhousec416daa2009-05-10 20:30:58 +01001143 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001144
1145 /* Make sure hardware complete it */
1146 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001147 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001148
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001149 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001150}
1151
1152static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1153{
1154 u32 val;
1155 unsigned long flag;
1156
David Woodhouse9af88142009-02-13 23:18:03 +00001157 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001158 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001159
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001160 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001161 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001162
1163 /* Make sure hardware complete it */
1164 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001165 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001166
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001167 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001168}
1169
1170/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001171static void __iommu_flush_context(struct intel_iommu *iommu,
1172 u16 did, u16 source_id, u8 function_mask,
1173 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001174{
1175 u64 val = 0;
1176 unsigned long flag;
1177
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001178 switch (type) {
1179 case DMA_CCMD_GLOBAL_INVL:
1180 val = DMA_CCMD_GLOBAL_INVL;
1181 break;
1182 case DMA_CCMD_DOMAIN_INVL:
1183 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1184 break;
1185 case DMA_CCMD_DEVICE_INVL:
1186 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1187 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1188 break;
1189 default:
1190 BUG();
1191 }
1192 val |= DMA_CCMD_ICC;
1193
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001194 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001195 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1196
1197 /* Make sure hardware complete it */
1198 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1199 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1200
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001201 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001202}
1203
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001204/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001205static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1206 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001207{
1208 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1209 u64 val = 0, val_iva = 0;
1210 unsigned long flag;
1211
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001212 switch (type) {
1213 case DMA_TLB_GLOBAL_FLUSH:
1214 /* global flush doesn't need set IVA_REG */
1215 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1216 break;
1217 case DMA_TLB_DSI_FLUSH:
1218 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1219 break;
1220 case DMA_TLB_PSI_FLUSH:
1221 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001222 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001223 val_iva = size_order | addr;
1224 break;
1225 default:
1226 BUG();
1227 }
1228 /* Note: set drain read/write */
1229#if 0
1230 /*
1231 * This is probably to be super secure.. Looks like we can
1232 * ignore it without any impact.
1233 */
1234 if (cap_read_drain(iommu->cap))
1235 val |= DMA_TLB_READ_DRAIN;
1236#endif
1237 if (cap_write_drain(iommu->cap))
1238 val |= DMA_TLB_WRITE_DRAIN;
1239
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001240 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001241 /* Note: Only uses first TLB reg currently */
1242 if (val_iva)
1243 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1244 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1245
1246 /* Make sure hardware complete it */
1247 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1248 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1249
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001250 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001251
1252 /* check IOTLB invalidation granularity */
1253 if (DMA_TLB_IAIG(val) == 0)
1254 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1255 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1256 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001257 (unsigned long long)DMA_TLB_IIRG(type),
1258 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001259}
1260
David Woodhouse64ae8922014-03-09 12:52:30 -07001261static struct device_domain_info *
1262iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1263 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001264{
Yu Zhao93a23a72009-05-18 13:51:37 +08001265 int found = 0;
1266 unsigned long flags;
1267 struct device_domain_info *info;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001268 struct pci_dev *pdev;
Yu Zhao93a23a72009-05-18 13:51:37 +08001269
1270 if (!ecap_dev_iotlb_support(iommu->ecap))
1271 return NULL;
1272
1273 if (!iommu->qi)
1274 return NULL;
1275
1276 spin_lock_irqsave(&device_domain_lock, flags);
1277 list_for_each_entry(info, &domain->devices, link)
1278 if (info->bus == bus && info->devfn == devfn) {
1279 found = 1;
1280 break;
1281 }
1282 spin_unlock_irqrestore(&device_domain_lock, flags);
1283
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001284 if (!found || !info->dev || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001285 return NULL;
1286
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001287 pdev = to_pci_dev(info->dev);
1288
1289 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Yu Zhao93a23a72009-05-18 13:51:37 +08001290 return NULL;
1291
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001292 if (!dmar_find_matched_atsr_unit(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001293 return NULL;
1294
Yu Zhao93a23a72009-05-18 13:51:37 +08001295 return info;
1296}
1297
1298static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1299{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001300 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001301 return;
1302
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001303 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Yu Zhao93a23a72009-05-18 13:51:37 +08001304}
1305
1306static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1307{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001308 if (!info->dev || !dev_is_pci(info->dev) ||
1309 !pci_ats_enabled(to_pci_dev(info->dev)))
Yu Zhao93a23a72009-05-18 13:51:37 +08001310 return;
1311
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001312 pci_disable_ats(to_pci_dev(info->dev));
Yu Zhao93a23a72009-05-18 13:51:37 +08001313}
1314
1315static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1316 u64 addr, unsigned mask)
1317{
1318 u16 sid, qdep;
1319 unsigned long flags;
1320 struct device_domain_info *info;
1321
1322 spin_lock_irqsave(&device_domain_lock, flags);
1323 list_for_each_entry(info, &domain->devices, link) {
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001324 struct pci_dev *pdev;
1325 if (!info->dev || !dev_is_pci(info->dev))
1326 continue;
1327
1328 pdev = to_pci_dev(info->dev);
1329 if (!pci_ats_enabled(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001330 continue;
1331
1332 sid = info->bus << 8 | info->devfn;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001333 qdep = pci_ats_queue_depth(pdev);
Yu Zhao93a23a72009-05-18 13:51:37 +08001334 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1335 }
1336 spin_unlock_irqrestore(&device_domain_lock, flags);
1337}
1338
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001339static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouseea8ea462014-03-05 17:09:32 +00001340 unsigned long pfn, unsigned int pages, int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001341{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001342 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001343 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001344
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001345 BUG_ON(pages == 0);
1346
David Woodhouseea8ea462014-03-05 17:09:32 +00001347 if (ih)
1348 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001349 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001350 * Fallback to domain selective flush if no PSI support or the size is
1351 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001352 * PSI requires page size to be 2 ^ x, and the base address is naturally
1353 * aligned to the size
1354 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001355 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1356 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001357 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001358 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001359 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001360 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001361
1362 /*
Nadav Amit82653632010-04-01 13:24:40 +03001363 * In caching mode, changes of pages from non-present to present require
1364 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001365 */
Nadav Amit82653632010-04-01 13:24:40 +03001366 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001367 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001368}
1369
mark grossf8bab732008-02-08 04:18:38 -08001370static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1371{
1372 u32 pmen;
1373 unsigned long flags;
1374
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001375 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001376 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1377 pmen &= ~DMA_PMEN_EPM;
1378 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1379
1380 /* wait for the protected region status bit to clear */
1381 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1382 readl, !(pmen & DMA_PMEN_PRS), pmen);
1383
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001384 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001385}
1386
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001387static int iommu_enable_translation(struct intel_iommu *iommu)
1388{
1389 u32 sts;
1390 unsigned long flags;
1391
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001392 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001393 iommu->gcmd |= DMA_GCMD_TE;
1394 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001395
1396 /* Make sure hardware complete it */
1397 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001398 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001399
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001400 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001401 return 0;
1402}
1403
1404static int iommu_disable_translation(struct intel_iommu *iommu)
1405{
1406 u32 sts;
1407 unsigned long flag;
1408
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001409 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001410 iommu->gcmd &= ~DMA_GCMD_TE;
1411 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1412
1413 /* Make sure hardware complete it */
1414 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001415 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001416
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001417 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001418 return 0;
1419}
1420
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001421
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001422static int iommu_init_domains(struct intel_iommu *iommu)
1423{
1424 unsigned long ndomains;
1425 unsigned long nlongs;
1426
1427 ndomains = cap_ndoms(iommu->cap);
Jiang Liu852bdb02014-01-06 14:18:11 +08001428 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1429 iommu->seq_id, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001430 nlongs = BITS_TO_LONGS(ndomains);
1431
Donald Dutile94a91b52009-08-20 16:51:34 -04001432 spin_lock_init(&iommu->lock);
1433
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001434 /* TBD: there might be 64K domains,
1435 * consider other allocation for future chip
1436 */
1437 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1438 if (!iommu->domain_ids) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001439 pr_err("IOMMU%d: allocating domain id array failed\n",
1440 iommu->seq_id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001441 return -ENOMEM;
1442 }
1443 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1444 GFP_KERNEL);
1445 if (!iommu->domains) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001446 pr_err("IOMMU%d: allocating domain array failed\n",
1447 iommu->seq_id);
1448 kfree(iommu->domain_ids);
1449 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001450 return -ENOMEM;
1451 }
1452
1453 /*
1454 * if Caching mode is set, then invalid translations are tagged
1455 * with domainid 0. Hence we need to pre-allocate it.
1456 */
1457 if (cap_caching_mode(iommu->cap))
1458 set_bit(0, iommu->domain_ids);
1459 return 0;
1460}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001461
Jiang Liua868e6b2014-01-06 14:18:20 +08001462static void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001463{
1464 struct dmar_domain *domain;
Jiang Liu5ced12a2014-01-06 14:18:22 +08001465 int i, count;
Weidong Hanc7151a82008-12-08 22:51:37 +08001466 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001467
Donald Dutile94a91b52009-08-20 16:51:34 -04001468 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001469 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001470 /*
1471 * Domain id 0 is reserved for invalid translation
1472 * if hardware supports caching mode.
1473 */
1474 if (cap_caching_mode(iommu->cap) && i == 0)
1475 continue;
1476
Donald Dutile94a91b52009-08-20 16:51:34 -04001477 domain = iommu->domains[i];
1478 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001479
Donald Dutile94a91b52009-08-20 16:51:34 -04001480 spin_lock_irqsave(&domain->iommu_lock, flags);
Jiang Liu5ced12a2014-01-06 14:18:22 +08001481 count = --domain->iommu_count;
1482 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001483 if (count == 0)
1484 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001485 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001486 }
1487
1488 if (iommu->gcmd & DMA_GCMD_TE)
1489 iommu_disable_translation(iommu);
1490
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001491 kfree(iommu->domains);
1492 kfree(iommu->domain_ids);
Jiang Liua868e6b2014-01-06 14:18:20 +08001493 iommu->domains = NULL;
1494 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001495
Weidong Hand9630fe2008-12-08 11:06:32 +08001496 g_iommus[iommu->seq_id] = NULL;
1497
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001498 /* free context mapping */
1499 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001500}
1501
Jiang Liu92d03cc2014-02-19 14:07:28 +08001502static struct dmar_domain *alloc_domain(bool vm)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001503{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001504 /* domain id for virtual machine, it won't be set in context */
1505 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001506 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001507
1508 domain = alloc_domain_mem();
1509 if (!domain)
1510 return NULL;
1511
Suresh Siddha4c923d42009-10-02 11:01:24 -07001512 domain->nid = -1;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001513 domain->iommu_count = 0;
Mike Travis1b198bb2012-03-05 15:05:16 -08001514 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Hand71a2f32008-12-07 21:13:41 +08001515 domain->flags = 0;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001516 spin_lock_init(&domain->iommu_lock);
1517 INIT_LIST_HEAD(&domain->devices);
1518 if (vm) {
1519 domain->id = atomic_inc_return(&vm_domid);
1520 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
1521 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001522
1523 return domain;
1524}
1525
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001526static int iommu_attach_domain(struct dmar_domain *domain,
1527 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001528{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001529 int num;
1530 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001531 unsigned long flags;
1532
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001533 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001534
1535 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001536
1537 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1538 if (num >= ndomains) {
1539 spin_unlock_irqrestore(&iommu->lock, flags);
1540 printk(KERN_ERR "IOMMU: no free domain ids\n");
1541 return -ENOMEM;
1542 }
1543
1544 domain->id = num;
Jiang Liu9ebd6822014-02-19 14:07:29 +08001545 domain->iommu_count++;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001546 set_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001547 set_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001548 iommu->domains[num] = domain;
1549 spin_unlock_irqrestore(&iommu->lock, flags);
1550
1551 return 0;
1552}
1553
1554static void iommu_detach_domain(struct dmar_domain *domain,
1555 struct intel_iommu *iommu)
1556{
1557 unsigned long flags;
1558 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001559
1560 spin_lock_irqsave(&iommu->lock, flags);
1561 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001562 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001563 if (iommu->domains[num] == domain) {
Jiang Liu92d03cc2014-02-19 14:07:28 +08001564 clear_bit(num, iommu->domain_ids);
1565 iommu->domains[num] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001566 break;
1567 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001568 }
Weidong Han8c11e792008-12-08 15:29:22 +08001569 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001570}
1571
1572static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001573static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001574
Joseph Cihula51a63e62011-03-21 11:04:24 -07001575static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001576{
1577 struct pci_dev *pdev = NULL;
1578 struct iova *iova;
1579 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001580
David Millerf6611972008-02-06 01:36:23 -08001581 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001582
Mark Gross8a443df2008-03-04 14:59:31 -08001583 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1584 &reserved_rbtree_key);
1585
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001586 /* IOAPIC ranges shouldn't be accessed by DMA */
1587 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1588 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001589 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001590 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001591 return -ENODEV;
1592 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001593
1594 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1595 for_each_pci_dev(pdev) {
1596 struct resource *r;
1597
1598 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1599 r = &pdev->resource[i];
1600 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1601 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001602 iova = reserve_iova(&reserved_iova_list,
1603 IOVA_PFN(r->start),
1604 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001605 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001606 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001607 return -ENODEV;
1608 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001609 }
1610 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001611 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001612}
1613
1614static void domain_reserve_special_ranges(struct dmar_domain *domain)
1615{
1616 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1617}
1618
1619static inline int guestwidth_to_adjustwidth(int gaw)
1620{
1621 int agaw;
1622 int r = (gaw - 12) % 9;
1623
1624 if (r == 0)
1625 agaw = gaw;
1626 else
1627 agaw = gaw + 9 - r;
1628 if (agaw > 64)
1629 agaw = 64;
1630 return agaw;
1631}
1632
1633static int domain_init(struct dmar_domain *domain, int guest_width)
1634{
1635 struct intel_iommu *iommu;
1636 int adjust_width, agaw;
1637 unsigned long sagaw;
1638
David Millerf6611972008-02-06 01:36:23 -08001639 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001640 domain_reserve_special_ranges(domain);
1641
1642 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001643 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001644 if (guest_width > cap_mgaw(iommu->cap))
1645 guest_width = cap_mgaw(iommu->cap);
1646 domain->gaw = guest_width;
1647 adjust_width = guestwidth_to_adjustwidth(guest_width);
1648 agaw = width_to_agaw(adjust_width);
1649 sagaw = cap_sagaw(iommu->cap);
1650 if (!test_bit(agaw, &sagaw)) {
1651 /* hardware doesn't support it, choose a bigger one */
1652 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1653 agaw = find_next_bit(&sagaw, 5, agaw);
1654 if (agaw >= 5)
1655 return -ENODEV;
1656 }
1657 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001658
Weidong Han8e6040972008-12-08 15:49:06 +08001659 if (ecap_coherent(iommu->ecap))
1660 domain->iommu_coherency = 1;
1661 else
1662 domain->iommu_coherency = 0;
1663
Sheng Yang58c610b2009-03-18 15:33:05 +08001664 if (ecap_sc_support(iommu->ecap))
1665 domain->iommu_snooping = 1;
1666 else
1667 domain->iommu_snooping = 0;
1668
David Woodhouse214e39a2014-03-19 10:38:49 +00001669 if (intel_iommu_superpage)
1670 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1671 else
1672 domain->iommu_superpage = 0;
1673
Suresh Siddha4c923d42009-10-02 11:01:24 -07001674 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001675
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001676 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001677 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001678 if (!domain->pgd)
1679 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001680 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001681 return 0;
1682}
1683
1684static void domain_exit(struct dmar_domain *domain)
1685{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001686 struct dmar_drhd_unit *drhd;
1687 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00001688 struct page *freelist = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001689
1690 /* Domain 0 is reserved, so dont process it */
1691 if (!domain)
1692 return;
1693
Alex Williamson7b668352011-05-24 12:02:41 +01001694 /* Flush any lazy unmaps that may reference this domain */
1695 if (!intel_iommu_strict)
1696 flush_unmaps_timeout(0);
1697
Jiang Liu92d03cc2014-02-19 14:07:28 +08001698 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001699 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001700
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001701 /* destroy iovas */
1702 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001703
David Woodhouseea8ea462014-03-05 17:09:32 +00001704 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001705
Jiang Liu92d03cc2014-02-19 14:07:28 +08001706 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001707 rcu_read_lock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001708 for_each_active_iommu(iommu, drhd)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001709 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1710 test_bit(iommu->seq_id, domain->iommu_bmp))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001711 iommu_detach_domain(domain, iommu);
Jiang Liu0e242612014-02-19 14:07:34 +08001712 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001713
David Woodhouseea8ea462014-03-05 17:09:32 +00001714 dma_free_pagelist(freelist);
1715
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001716 free_domain_mem(domain);
1717}
1718
David Woodhouse64ae8922014-03-09 12:52:30 -07001719static int domain_context_mapping_one(struct dmar_domain *domain,
1720 struct intel_iommu *iommu,
1721 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001722{
1723 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001724 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001725 struct dma_pte *pgd;
1726 unsigned long num;
1727 unsigned long ndomains;
1728 int id;
1729 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001730 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001731
1732 pr_debug("Set context mapping for %02x:%02x.%d\n",
1733 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001734
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001735 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001736 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1737 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001738
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001739 context = device_to_context_entry(iommu, bus, devfn);
1740 if (!context)
1741 return -ENOMEM;
1742 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001743 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001744 spin_unlock_irqrestore(&iommu->lock, flags);
1745 return 0;
1746 }
1747
Weidong Hanea6606b2008-12-08 23:08:15 +08001748 id = domain->id;
1749 pgd = domain->pgd;
1750
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001751 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1752 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001753 int found = 0;
1754
1755 /* find an available domain id for this device in iommu */
1756 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001757 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001758 if (iommu->domains[num] == domain) {
1759 id = num;
1760 found = 1;
1761 break;
1762 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001763 }
1764
1765 if (found == 0) {
1766 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1767 if (num >= ndomains) {
1768 spin_unlock_irqrestore(&iommu->lock, flags);
1769 printk(KERN_ERR "IOMMU: no free domain ids\n");
1770 return -EFAULT;
1771 }
1772
1773 set_bit(num, iommu->domain_ids);
1774 iommu->domains[num] = domain;
1775 id = num;
1776 }
1777
1778 /* Skip top levels of page tables for
1779 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001780 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001781 */
Chris Wright1672af12009-12-02 12:06:34 -08001782 if (translation != CONTEXT_TT_PASS_THROUGH) {
1783 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1784 pgd = phys_to_virt(dma_pte_addr(pgd));
1785 if (!dma_pte_present(pgd)) {
1786 spin_unlock_irqrestore(&iommu->lock, flags);
1787 return -ENOMEM;
1788 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001789 }
1790 }
1791 }
1792
1793 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001794
Yu Zhao93a23a72009-05-18 13:51:37 +08001795 if (translation != CONTEXT_TT_PASS_THROUGH) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001796 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Yu Zhao93a23a72009-05-18 13:51:37 +08001797 translation = info ? CONTEXT_TT_DEV_IOTLB :
1798 CONTEXT_TT_MULTI_LEVEL;
1799 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001800 /*
1801 * In pass through mode, AW must be programmed to indicate the largest
1802 * AGAW value supported by hardware. And ASR is ignored by hardware.
1803 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001804 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001805 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001806 else {
1807 context_set_address_root(context, virt_to_phys(pgd));
1808 context_set_address_width(context, iommu->agaw);
1809 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001810
1811 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001812 context_set_fault_enable(context);
1813 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001814 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001815
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001816 /*
1817 * It's a non-present to present mapping. If hardware doesn't cache
1818 * non-present entry we only need to flush the write-buffer. If the
1819 * _does_ cache non-present entries, then it does so in the special
1820 * domain #0, which we have to flush:
1821 */
1822 if (cap_caching_mode(iommu->cap)) {
1823 iommu->flush.flush_context(iommu, 0,
1824 (((u16)bus) << 8) | devfn,
1825 DMA_CCMD_MASK_NOBIT,
1826 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001827 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001828 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001829 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001830 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001831 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001832 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001833
1834 spin_lock_irqsave(&domain->iommu_lock, flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08001835 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08001836 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001837 if (domain->iommu_count == 1)
1838 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001839 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001840 }
1841 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001842 return 0;
1843}
1844
1845static int
David Woodhousee1f167f2014-03-09 15:24:46 -07001846domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1847 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001848{
1849 int ret;
David Woodhousee1f167f2014-03-09 15:24:46 -07001850 struct pci_dev *pdev, *tmp, *parent;
David Woodhouse64ae8922014-03-09 12:52:30 -07001851 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001852 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001853
David Woodhousee1f167f2014-03-09 15:24:46 -07001854 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07001855 if (!iommu)
1856 return -ENODEV;
1857
David Woodhouse156baca2014-03-09 14:00:57 -07001858 ret = domain_context_mapping_one(domain, iommu, bus, devfn,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001859 translation);
David Woodhousee1f167f2014-03-09 15:24:46 -07001860 if (ret || !dev_is_pci(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001861 return ret;
1862
1863 /* dependent device mapping */
David Woodhousee1f167f2014-03-09 15:24:46 -07001864 pdev = to_pci_dev(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001865 tmp = pci_find_upstream_pcie_bridge(pdev);
1866 if (!tmp)
1867 return 0;
1868 /* Secondary interface's bus number and devfn 0 */
1869 parent = pdev->bus->self;
1870 while (parent != tmp) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001871 ret = domain_context_mapping_one(domain, iommu,
David Woodhouse276dbf992009-04-04 01:45:37 +01001872 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001873 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001874 if (ret)
1875 return ret;
1876 parent = parent->bus->self;
1877 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001878 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
David Woodhouse64ae8922014-03-09 12:52:30 -07001879 return domain_context_mapping_one(domain, iommu,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001880 tmp->subordinate->number, 0,
1881 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001882 else /* this is a legacy PCI bridge */
David Woodhouse64ae8922014-03-09 12:52:30 -07001883 return domain_context_mapping_one(domain, iommu,
David Woodhouse276dbf992009-04-04 01:45:37 +01001884 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001885 tmp->devfn,
1886 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001887}
1888
David Woodhousee1f167f2014-03-09 15:24:46 -07001889static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001890{
1891 int ret;
David Woodhousee1f167f2014-03-09 15:24:46 -07001892 struct pci_dev *pdev, *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001893 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001894 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08001895
David Woodhousee1f167f2014-03-09 15:24:46 -07001896 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001897 if (!iommu)
1898 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001899
David Woodhouse156baca2014-03-09 14:00:57 -07001900 ret = device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07001901 if (!ret || !dev_is_pci(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001902 return ret;
David Woodhousee1f167f2014-03-09 15:24:46 -07001903
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001904 /* dependent device mapping */
David Woodhousee1f167f2014-03-09 15:24:46 -07001905 pdev = to_pci_dev(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001906 tmp = pci_find_upstream_pcie_bridge(pdev);
1907 if (!tmp)
1908 return ret;
1909 /* Secondary interface's bus number and devfn 0 */
1910 parent = pdev->bus->self;
1911 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001912 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001913 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001914 if (!ret)
1915 return ret;
1916 parent = parent->bus->self;
1917 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001918 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001919 return device_context_mapped(iommu, tmp->subordinate->number,
1920 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001921 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001922 return device_context_mapped(iommu, tmp->bus->number,
1923 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001924}
1925
Fenghua Yuf5329592009-08-04 15:09:37 -07001926/* Returns a number of VTD pages, but aligned to MM page size */
1927static inline unsigned long aligned_nrpages(unsigned long host_addr,
1928 size_t size)
1929{
1930 host_addr &= ~PAGE_MASK;
1931 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1932}
1933
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001934/* Return largest possible superpage level for a given mapping */
1935static inline int hardware_largepage_caps(struct dmar_domain *domain,
1936 unsigned long iov_pfn,
1937 unsigned long phy_pfn,
1938 unsigned long pages)
1939{
1940 int support, level = 1;
1941 unsigned long pfnmerge;
1942
1943 support = domain->iommu_superpage;
1944
1945 /* To use a large page, the virtual *and* physical addresses
1946 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1947 of them will mean we have to use smaller pages. So just
1948 merge them and check both at once. */
1949 pfnmerge = iov_pfn | phy_pfn;
1950
1951 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1952 pages >>= VTD_STRIDE_SHIFT;
1953 if (!pages)
1954 break;
1955 pfnmerge >>= VTD_STRIDE_SHIFT;
1956 level++;
1957 support--;
1958 }
1959 return level;
1960}
1961
David Woodhouse9051aa02009-06-29 12:30:54 +01001962static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1963 struct scatterlist *sg, unsigned long phys_pfn,
1964 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001965{
1966 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001967 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001968 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001969 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001970 unsigned int largepage_lvl = 0;
1971 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001972
1973 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1974
1975 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1976 return -EINVAL;
1977
1978 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1979
David Woodhouse9051aa02009-06-29 12:30:54 +01001980 if (sg)
1981 sg_res = 0;
1982 else {
1983 sg_res = nr_pages + 1;
1984 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1985 }
1986
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001987 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001988 uint64_t tmp;
1989
David Woodhousee1605492009-06-29 11:17:38 +01001990 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001991 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001992 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1993 sg->dma_length = sg->length;
1994 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001995 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001996 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001997
David Woodhousee1605492009-06-29 11:17:38 +01001998 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001999 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2000
David Woodhouse5cf0a762014-03-19 16:07:49 +00002001 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002002 if (!pte)
2003 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002004 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002005 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002006 pteval |= DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002007 /* Ensure that old small page tables are removed to make room
2008 for superpage, if they exist. */
2009 dma_pte_clear_range(domain, iov_pfn,
2010 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
2011 dma_pte_free_pagetable(domain, iov_pfn,
2012 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
2013 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002014 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002015 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002016
David Woodhousee1605492009-06-29 11:17:38 +01002017 }
2018 /* We don't need lock here, nobody else
2019 * touches the iova range
2020 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002021 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002022 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002023 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01002024 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2025 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002026 if (dumps) {
2027 dumps--;
2028 debug_dma_dump_mappings(NULL);
2029 }
2030 WARN_ON(1);
2031 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002032
2033 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2034
2035 BUG_ON(nr_pages < lvl_pages);
2036 BUG_ON(sg_res < lvl_pages);
2037
2038 nr_pages -= lvl_pages;
2039 iov_pfn += lvl_pages;
2040 phys_pfn += lvl_pages;
2041 pteval += lvl_pages * VTD_PAGE_SIZE;
2042 sg_res -= lvl_pages;
2043
2044 /* If the next PTE would be the first in a new page, then we
2045 need to flush the cache on the entries we've just written.
2046 And then we'll need to recalculate 'pte', so clear it and
2047 let it get set again in the if (!pte) block above.
2048
2049 If we're done (!nr_pages) we need to flush the cache too.
2050
2051 Also if we've been setting superpages, we may need to
2052 recalculate 'pte' and switch back to smaller pages for the
2053 end of the mapping, if the trailing size is not enough to
2054 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002055 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002056 if (!nr_pages || first_pte_in_page(pte) ||
2057 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002058 domain_flush_cache(domain, first_pte,
2059 (void *)pte - (void *)first_pte);
2060 pte = NULL;
2061 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002062
2063 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002064 sg = sg_next(sg);
2065 }
2066 return 0;
2067}
2068
David Woodhouse9051aa02009-06-29 12:30:54 +01002069static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2070 struct scatterlist *sg, unsigned long nr_pages,
2071 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002072{
David Woodhouse9051aa02009-06-29 12:30:54 +01002073 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2074}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002075
David Woodhouse9051aa02009-06-29 12:30:54 +01002076static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2077 unsigned long phys_pfn, unsigned long nr_pages,
2078 int prot)
2079{
2080 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002081}
2082
Weidong Hanc7151a82008-12-08 22:51:37 +08002083static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002084{
Weidong Hanc7151a82008-12-08 22:51:37 +08002085 if (!iommu)
2086 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002087
2088 clear_context_table(iommu, bus, devfn);
2089 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002090 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002091 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002092}
2093
David Woodhouse109b9b02012-05-25 17:43:02 +01002094static inline void unlink_domain_info(struct device_domain_info *info)
2095{
2096 assert_spin_locked(&device_domain_lock);
2097 list_del(&info->link);
2098 list_del(&info->global);
2099 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002100 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002101}
2102
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002103static void domain_remove_dev_info(struct dmar_domain *domain)
2104{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002105 struct device_domain_info *info, *tmp;
Jiang Liu92d03cc2014-02-19 14:07:28 +08002106 unsigned long flags, flags2;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002107
2108 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wang3a74ca02014-05-20 20:37:47 +08002109 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhouse109b9b02012-05-25 17:43:02 +01002110 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002111 spin_unlock_irqrestore(&device_domain_lock, flags);
2112
Yu Zhao93a23a72009-05-18 13:51:37 +08002113 iommu_disable_dev_iotlb(info);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002114 iommu_detach_dev(info->iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002115
Jiang Liu92d03cc2014-02-19 14:07:28 +08002116 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
David Woodhouse7c7faa12014-03-09 13:33:06 -07002117 iommu_detach_dependent_devices(info->iommu, info->dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +08002118 /* clear this iommu in iommu_bmp, update iommu count
2119 * and capabilities
2120 */
2121 spin_lock_irqsave(&domain->iommu_lock, flags2);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002122 if (test_and_clear_bit(info->iommu->seq_id,
Jiang Liu92d03cc2014-02-19 14:07:28 +08002123 domain->iommu_bmp)) {
2124 domain->iommu_count--;
2125 domain_update_iommu_cap(domain);
2126 }
2127 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
2128 }
2129
2130 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002131 spin_lock_irqsave(&device_domain_lock, flags);
2132 }
2133 spin_unlock_irqrestore(&device_domain_lock, flags);
2134}
2135
2136/*
2137 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002138 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002139 */
David Woodhouse1525a292014-03-06 16:19:30 +00002140static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002141{
2142 struct device_domain_info *info;
2143
2144 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002145 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002146 if (info)
2147 return info->domain;
2148 return NULL;
2149}
2150
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002151static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002152dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2153{
2154 struct device_domain_info *info;
2155
2156 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002157 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002158 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002159 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002160
2161 return NULL;
2162}
2163
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002164static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
David Woodhouse41e80dca2014-03-09 13:55:54 -07002165 int bus, int devfn,
David Woodhouseb718cd32014-03-09 13:11:33 -07002166 struct device *dev,
2167 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002168{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002169 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002170 struct device_domain_info *info;
2171 unsigned long flags;
2172
2173 info = alloc_devinfo_mem();
2174 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002175 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002176
Jiang Liu745f2582014-02-19 14:07:26 +08002177 info->bus = bus;
2178 info->devfn = devfn;
2179 info->dev = dev;
2180 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002181 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002182 if (!dev)
2183 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
2184
2185 spin_lock_irqsave(&device_domain_lock, flags);
2186 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002187 found = find_domain(dev);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002188 else {
2189 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002190 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002191 if (info2)
2192 found = info2->domain;
2193 }
Jiang Liu745f2582014-02-19 14:07:26 +08002194 if (found) {
2195 spin_unlock_irqrestore(&device_domain_lock, flags);
2196 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002197 /* Caller must free the original domain */
2198 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002199 }
2200
David Woodhouseb718cd32014-03-09 13:11:33 -07002201 list_add(&info->link, &domain->devices);
2202 list_add(&info->global, &device_domain_list);
2203 if (dev)
2204 dev->archdata.iommu = info;
2205 spin_unlock_irqrestore(&device_domain_lock, flags);
2206
2207 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002208}
2209
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002210/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002211static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002212{
Jiang Liue85bb5d2014-02-19 14:07:27 +08002213 struct dmar_domain *domain, *free = NULL;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002214 struct intel_iommu *iommu = NULL;
2215 struct device_domain_info *info;
David Woodhouse146922e2014-03-09 15:44:17 -07002216 struct pci_dev *dev_tmp = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002217 unsigned long flags;
David Woodhouse146922e2014-03-09 15:44:17 -07002218 u8 bus, devfn, bridge_bus, bridge_devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002219
David Woodhouse146922e2014-03-09 15:44:17 -07002220 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002221 if (domain)
2222 return domain;
2223
David Woodhouse146922e2014-03-09 15:44:17 -07002224 if (dev_is_pci(dev)) {
2225 struct pci_dev *pdev = to_pci_dev(dev);
2226 u16 segment;
David Woodhouse276dbf992009-04-04 01:45:37 +01002227
David Woodhouse146922e2014-03-09 15:44:17 -07002228 segment = pci_domain_nr(pdev->bus);
2229 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
2230 if (dev_tmp) {
2231 if (pci_is_pcie(dev_tmp)) {
2232 bridge_bus = dev_tmp->subordinate->number;
2233 bridge_devfn = 0;
2234 } else {
2235 bridge_bus = dev_tmp->bus->number;
2236 bridge_devfn = dev_tmp->devfn;
2237 }
2238 spin_lock_irqsave(&device_domain_lock, flags);
David Woodhouse9f05d3f2014-04-14 22:01:30 -07002239 info = dmar_search_domain_by_dev_info(segment,
2240 bridge_bus,
2241 bridge_devfn);
David Woodhouse146922e2014-03-09 15:44:17 -07002242 if (info) {
2243 iommu = info->iommu;
2244 domain = info->domain;
2245 }
2246 spin_unlock_irqrestore(&device_domain_lock, flags);
2247 /* pcie-pci bridge already has a domain, uses it */
2248 if (info)
2249 goto found_domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002250 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002251 }
2252
David Woodhouse146922e2014-03-09 15:44:17 -07002253 iommu = device_to_iommu(dev, &bus, &devfn);
2254 if (!iommu)
2255 goto error;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002256
David Woodhouse146922e2014-03-09 15:44:17 -07002257 /* Allocate and initialize new domain for the device */
Jiang Liu92d03cc2014-02-19 14:07:28 +08002258 domain = alloc_domain(false);
Jiang Liu745f2582014-02-19 14:07:26 +08002259 if (!domain)
2260 goto error;
2261 if (iommu_attach_domain(domain, iommu)) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002262 free_domain_mem(domain);
Dan Carpenter14d40562014-03-28 11:29:50 +03002263 domain = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002264 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002265 }
Jiang Liue85bb5d2014-02-19 14:07:27 +08002266 free = domain;
2267 if (domain_init(domain, gaw))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002268 goto error;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002269
2270 /* register pcie-to-pci device */
2271 if (dev_tmp) {
David Woodhouse146922e2014-03-09 15:44:17 -07002272 domain = dmar_insert_dev_info(iommu, bridge_bus, bridge_devfn,
2273 NULL, domain);
David Woodhouseb718cd32014-03-09 13:11:33 -07002274 if (!domain)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002275 goto error;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002276 }
2277
2278found_domain:
David Woodhouse146922e2014-03-09 15:44:17 -07002279 domain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002280error:
David Woodhouseb718cd32014-03-09 13:11:33 -07002281 if (free != domain)
Jiang Liue85bb5d2014-02-19 14:07:27 +08002282 domain_exit(free);
David Woodhouseb718cd32014-03-09 13:11:33 -07002283
2284 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002285}
2286
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002287static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002288#define IDENTMAP_ALL 1
2289#define IDENTMAP_GFX 2
2290#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002291
David Woodhouseb2132032009-06-26 18:50:28 +01002292static int iommu_domain_identity_map(struct dmar_domain *domain,
2293 unsigned long long start,
2294 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002295{
David Woodhousec5395d52009-06-28 16:35:56 +01002296 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2297 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002298
David Woodhousec5395d52009-06-28 16:35:56 +01002299 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2300 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002301 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002302 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002303 }
2304
David Woodhousec5395d52009-06-28 16:35:56 +01002305 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2306 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002307 /*
2308 * RMRR range might have overlap with physical memory range,
2309 * clear it first
2310 */
David Woodhousec5395d52009-06-28 16:35:56 +01002311 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002312
David Woodhousec5395d52009-06-28 16:35:56 +01002313 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2314 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002315 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002316}
2317
David Woodhouse0b9d9752014-03-09 15:48:15 -07002318static int iommu_prepare_identity_map(struct device *dev,
David Woodhouseb2132032009-06-26 18:50:28 +01002319 unsigned long long start,
2320 unsigned long long end)
2321{
2322 struct dmar_domain *domain;
2323 int ret;
2324
David Woodhouse0b9d9752014-03-09 15:48:15 -07002325 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002326 if (!domain)
2327 return -ENOMEM;
2328
David Woodhouse19943b02009-08-04 16:19:20 +01002329 /* For _hardware_ passthrough, don't bother. But for software
2330 passthrough, we do it anyway -- it may indicate a memory
2331 range which is reserved in E820, so which didn't get set
2332 up to start with in si_domain */
2333 if (domain == si_domain && hw_pass_through) {
2334 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002335 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002336 return 0;
2337 }
2338
2339 printk(KERN_INFO
2340 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002341 dev_name(dev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002342
David Woodhouse5595b522009-12-02 09:21:55 +00002343 if (end < start) {
2344 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2345 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2346 dmi_get_system_info(DMI_BIOS_VENDOR),
2347 dmi_get_system_info(DMI_BIOS_VERSION),
2348 dmi_get_system_info(DMI_PRODUCT_VERSION));
2349 ret = -EIO;
2350 goto error;
2351 }
2352
David Woodhouse2ff729f2009-08-26 14:25:41 +01002353 if (end >> agaw_to_width(domain->agaw)) {
2354 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2355 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2356 agaw_to_width(domain->agaw),
2357 dmi_get_system_info(DMI_BIOS_VENDOR),
2358 dmi_get_system_info(DMI_BIOS_VERSION),
2359 dmi_get_system_info(DMI_PRODUCT_VERSION));
2360 ret = -EIO;
2361 goto error;
2362 }
David Woodhouse19943b02009-08-04 16:19:20 +01002363
David Woodhouseb2132032009-06-26 18:50:28 +01002364 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002365 if (ret)
2366 goto error;
2367
2368 /* context entry init */
David Woodhouse0b9d9752014-03-09 15:48:15 -07002369 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002370 if (ret)
2371 goto error;
2372
2373 return 0;
2374
2375 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002376 domain_exit(domain);
2377 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002378}
2379
2380static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002381 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002382{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002383 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002384 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002385 return iommu_prepare_identity_map(dev, rmrr->base_address,
2386 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002387}
2388
Suresh Siddhad3f13812011-08-23 17:05:25 -07002389#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002390static inline void iommu_prepare_isa(void)
2391{
2392 struct pci_dev *pdev;
2393 int ret;
2394
2395 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2396 if (!pdev)
2397 return;
2398
David Woodhousec7ab48d2009-06-26 19:10:36 +01002399 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002400 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002401
2402 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002403 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2404 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002405
Yijing Wang9b27e822014-05-20 20:37:52 +08002406 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002407}
2408#else
2409static inline void iommu_prepare_isa(void)
2410{
2411 return;
2412}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002413#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002414
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002415static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002416
Matt Kraai071e1372009-08-23 22:30:22 -07002417static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002418{
2419 struct dmar_drhd_unit *drhd;
2420 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002421 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002422
Jiang Liu92d03cc2014-02-19 14:07:28 +08002423 si_domain = alloc_domain(false);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002424 if (!si_domain)
2425 return -EFAULT;
2426
Jiang Liu92d03cc2014-02-19 14:07:28 +08002427 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2428
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002429 for_each_active_iommu(iommu, drhd) {
2430 ret = iommu_attach_domain(si_domain, iommu);
2431 if (ret) {
2432 domain_exit(si_domain);
2433 return -EFAULT;
2434 }
2435 }
2436
2437 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2438 domain_exit(si_domain);
2439 return -EFAULT;
2440 }
2441
Jiang Liu9544c002014-01-06 14:18:13 +08002442 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2443 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002444
David Woodhouse19943b02009-08-04 16:19:20 +01002445 if (hw)
2446 return 0;
2447
David Woodhousec7ab48d2009-06-26 19:10:36 +01002448 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002449 unsigned long start_pfn, end_pfn;
2450 int i;
2451
2452 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2453 ret = iommu_domain_identity_map(si_domain,
2454 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2455 if (ret)
2456 return ret;
2457 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002458 }
2459
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002460 return 0;
2461}
2462
David Woodhouse9b226622014-03-09 14:03:28 -07002463static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002464{
2465 struct device_domain_info *info;
2466
2467 if (likely(!iommu_identity_mapping))
2468 return 0;
2469
David Woodhouse9b226622014-03-09 14:03:28 -07002470 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002471 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2472 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002473
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002474 return 0;
2475}
2476
2477static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5913c9b2014-03-09 16:27:31 -07002478 struct device *dev, int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002479{
David Woodhouse0ac72662014-03-09 13:19:22 -07002480 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002481 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002482 u8 bus, devfn;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002483 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002484
David Woodhouse5913c9b2014-03-09 16:27:31 -07002485 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002486 if (!iommu)
2487 return -ENODEV;
2488
David Woodhouse5913c9b2014-03-09 16:27:31 -07002489 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002490 if (ndomain != domain)
2491 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002492
David Woodhouse5913c9b2014-03-09 16:27:31 -07002493 ret = domain_context_mapping(domain, dev, translation);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002494 if (ret) {
David Woodhouse5913c9b2014-03-09 16:27:31 -07002495 domain_remove_one_dev_info(domain, dev);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002496 return ret;
2497 }
2498
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002499 return 0;
2500}
2501
David Woodhouse0b9d9752014-03-09 15:48:15 -07002502static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002503{
2504 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002505 struct device *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002506 int i;
2507
Jiang Liu0e242612014-02-19 14:07:34 +08002508 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002509 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002510 /*
2511 * Return TRUE if this RMRR contains the device that
2512 * is passed in.
2513 */
2514 for_each_active_dev_scope(rmrr->devices,
2515 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002516 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002517 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002518 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002519 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002520 }
Jiang Liu0e242612014-02-19 14:07:34 +08002521 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002522 return false;
2523}
2524
David Woodhouse3bdb2592014-03-09 16:03:08 -07002525static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002526{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002527
David Woodhouse3bdb2592014-03-09 16:03:08 -07002528 if (dev_is_pci(dev)) {
2529 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002530
David Woodhouse3bdb2592014-03-09 16:03:08 -07002531 /*
2532 * We want to prevent any device associated with an RMRR from
2533 * getting placed into the SI Domain. This is done because
2534 * problems exist when devices are moved in and out of domains
2535 * and their respective RMRR info is lost. We exempt USB devices
2536 * from this process due to their usage of RMRRs that are known
2537 * to not be needed after BIOS hand-off to OS.
2538 */
2539 if (device_has_rmrr(dev) &&
2540 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2541 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002542
David Woodhouse3bdb2592014-03-09 16:03:08 -07002543 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2544 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002545
David Woodhouse3bdb2592014-03-09 16:03:08 -07002546 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2547 return 1;
2548
2549 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2550 return 0;
2551
2552 /*
2553 * We want to start off with all devices in the 1:1 domain, and
2554 * take them out later if we find they can't access all of memory.
2555 *
2556 * However, we can't do this for PCI devices behind bridges,
2557 * because all PCI devices behind the same bridge will end up
2558 * with the same source-id on their transactions.
2559 *
2560 * Practically speaking, we can't change things around for these
2561 * devices at run-time, because we can't be sure there'll be no
2562 * DMA transactions in flight for any of their siblings.
2563 *
2564 * So PCI devices (unless they're on the root bus) as well as
2565 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2566 * the 1:1 domain, just in _case_ one of their siblings turns out
2567 * not to be able to map all of memory.
2568 */
2569 if (!pci_is_pcie(pdev)) {
2570 if (!pci_is_root_bus(pdev->bus))
2571 return 0;
2572 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2573 return 0;
2574 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2575 return 0;
2576 } else {
2577 if (device_has_rmrr(dev))
2578 return 0;
2579 }
David Woodhouse6941af22009-07-04 18:24:27 +01002580
David Woodhouse3dfc8132009-07-04 19:11:08 +01002581 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002582 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002583 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002584 * take them out of the 1:1 domain later.
2585 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002586 if (!startup) {
2587 /*
2588 * If the device's dma_mask is less than the system's memory
2589 * size then this is not a candidate for identity mapping.
2590 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002591 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002592
David Woodhouse3bdb2592014-03-09 16:03:08 -07002593 if (dev->coherent_dma_mask &&
2594 dev->coherent_dma_mask < dma_mask)
2595 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002596
David Woodhouse3bdb2592014-03-09 16:03:08 -07002597 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002598 }
David Woodhouse6941af22009-07-04 18:24:27 +01002599
2600 return 1;
2601}
2602
David Woodhousecf04eee2014-03-21 16:49:04 +00002603static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2604{
2605 int ret;
2606
2607 if (!iommu_should_identity_map(dev, 1))
2608 return 0;
2609
2610 ret = domain_add_dev_info(si_domain, dev,
2611 hw ? CONTEXT_TT_PASS_THROUGH :
2612 CONTEXT_TT_MULTI_LEVEL);
2613 if (!ret)
2614 pr_info("IOMMU: %s identity mapping for device %s\n",
2615 hw ? "hardware" : "software", dev_name(dev));
2616 else if (ret == -ENODEV)
2617 /* device not associated with an iommu */
2618 ret = 0;
2619
2620 return ret;
2621}
2622
2623
Matt Kraai071e1372009-08-23 22:30:22 -07002624static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002625{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002626 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002627 struct dmar_drhd_unit *drhd;
2628 struct intel_iommu *iommu;
2629 struct device *dev;
2630 int i;
2631 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002632
David Woodhouse19943b02009-08-04 16:19:20 +01002633 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002634 if (ret)
2635 return -EFAULT;
2636
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002637 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002638 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2639 if (ret)
2640 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002641 }
2642
David Woodhousecf04eee2014-03-21 16:49:04 +00002643 for_each_active_iommu(iommu, drhd)
2644 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2645 struct acpi_device_physical_node *pn;
2646 struct acpi_device *adev;
2647
2648 if (dev->bus != &acpi_bus_type)
2649 continue;
2650
2651 adev= to_acpi_device(dev);
2652 mutex_lock(&adev->physical_node_lock);
2653 list_for_each_entry(pn, &adev->physical_node_list, node) {
2654 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2655 if (ret)
2656 break;
2657 }
2658 mutex_unlock(&adev->physical_node_lock);
2659 if (ret)
2660 return ret;
2661 }
2662
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002663 return 0;
2664}
2665
Joseph Cihulab7792602011-05-03 00:08:37 -07002666static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002667{
2668 struct dmar_drhd_unit *drhd;
2669 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002670 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002671 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002672 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002673
2674 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002675 * for each drhd
2676 * allocate root
2677 * initialize and program root entry to not present
2678 * endfor
2679 */
2680 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002681 /*
2682 * lock not needed as this is only incremented in the single
2683 * threaded kernel __init code path all other access are read
2684 * only
2685 */
Mike Travis1b198bb2012-03-05 15:05:16 -08002686 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2687 g_num_of_iommus++;
2688 continue;
2689 }
2690 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2691 IOMMU_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002692 }
2693
Weidong Hand9630fe2008-12-08 11:06:32 +08002694 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2695 GFP_KERNEL);
2696 if (!g_iommus) {
2697 printk(KERN_ERR "Allocating global iommu array failed\n");
2698 ret = -ENOMEM;
2699 goto error;
2700 }
2701
mark gross80b20dd2008-04-18 13:53:58 -07002702 deferred_flush = kzalloc(g_num_of_iommus *
2703 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2704 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002705 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08002706 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08002707 }
2708
Jiang Liu7c919772014-01-06 14:18:18 +08002709 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002710 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002711
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002712 ret = iommu_init_domains(iommu);
2713 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002714 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002715
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002716 /*
2717 * TBD:
2718 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002719 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002720 */
2721 ret = iommu_alloc_root_entry(iommu);
2722 if (ret) {
2723 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002724 goto free_iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002725 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002726 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002727 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002728 }
2729
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002730 /*
2731 * Start from the sane iommu hardware state.
2732 */
Jiang Liu7c919772014-01-06 14:18:18 +08002733 for_each_active_iommu(iommu, drhd) {
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002734 /*
2735 * If the queued invalidation is already initialized by us
2736 * (for example, while enabling interrupt-remapping) then
2737 * we got the things already rolling from a sane state.
2738 */
2739 if (iommu->qi)
2740 continue;
2741
2742 /*
2743 * Clear any previous faults.
2744 */
2745 dmar_fault(-1, iommu);
2746 /*
2747 * Disable queued invalidation if supported and already enabled
2748 * before OS handover.
2749 */
2750 dmar_disable_qi(iommu);
2751 }
2752
Jiang Liu7c919772014-01-06 14:18:18 +08002753 for_each_active_iommu(iommu, drhd) {
Youquan Songa77b67d2008-10-16 16:31:56 -07002754 if (dmar_enable_qi(iommu)) {
2755 /*
2756 * Queued Invalidate not enabled, use Register Based
2757 * Invalidate
2758 */
2759 iommu->flush.flush_context = __iommu_flush_context;
2760 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002761 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002762 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002763 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002764 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002765 } else {
2766 iommu->flush.flush_context = qi_flush_context;
2767 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002768 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002769 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002770 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002771 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002772 }
2773 }
2774
David Woodhouse19943b02009-08-04 16:19:20 +01002775 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002776 iommu_identity_mapping |= IDENTMAP_ALL;
2777
Suresh Siddhad3f13812011-08-23 17:05:25 -07002778#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002779 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002780#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002781
2782 check_tylersburg_isoch();
2783
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002784 /*
2785 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002786 * identity mappings for rmrr, gfx, and isa and may fall back to static
2787 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002788 */
David Woodhouse19943b02009-08-04 16:19:20 +01002789 if (iommu_identity_mapping) {
2790 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2791 if (ret) {
2792 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002793 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002794 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002795 }
David Woodhouse19943b02009-08-04 16:19:20 +01002796 /*
2797 * For each rmrr
2798 * for each dev attached to rmrr
2799 * do
2800 * locate drhd for dev, alloc domain for dev
2801 * allocate free domain
2802 * allocate page table entries for rmrr
2803 * if context not allocated for bus
2804 * allocate and init context
2805 * set present in root table for this bus
2806 * init context with domain, translation etc
2807 * endfor
2808 * endfor
2809 */
2810 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2811 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002812 /* some BIOS lists non-exist devices in DMAR table. */
2813 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00002814 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07002815 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01002816 if (ret)
2817 printk(KERN_ERR
2818 "IOMMU: mapping reserved region failed\n");
2819 }
2820 }
2821
2822 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002823
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002824 /*
2825 * for each drhd
2826 * enable fault log
2827 * global invalidate context cache
2828 * global invalidate iotlb
2829 * enable translation
2830 */
Jiang Liu7c919772014-01-06 14:18:18 +08002831 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002832 if (drhd->ignored) {
2833 /*
2834 * we always have to disable PMRs or DMA may fail on
2835 * this device
2836 */
2837 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08002838 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002839 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002840 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002841
2842 iommu_flush_write_buffer(iommu);
2843
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002844 ret = dmar_set_interrupt(iommu);
2845 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002846 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002847
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002848 iommu_set_root_entry(iommu);
2849
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002850 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002851 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002852
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002853 ret = iommu_enable_translation(iommu);
2854 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002855 goto free_iommu;
David Woodhouseb94996c2009-09-19 15:28:12 -07002856
2857 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002858 }
2859
2860 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08002861
2862free_iommu:
Jiang Liu7c919772014-01-06 14:18:18 +08002863 for_each_active_iommu(iommu, drhd)
Jiang Liua868e6b2014-01-06 14:18:20 +08002864 free_dmar_iommu(iommu);
Jiang Liu9bdc5312014-01-06 14:18:27 +08002865 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08002866free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08002867 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08002868error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002869 return ret;
2870}
2871
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002872/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002873static struct iova *intel_alloc_iova(struct device *dev,
2874 struct dmar_domain *domain,
2875 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002876{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002877 struct iova *iova = NULL;
2878
David Woodhouse875764d2009-06-28 21:20:51 +01002879 /* Restrict dma_mask to the width that the iommu can handle */
2880 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2881
2882 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002883 /*
2884 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002885 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002886 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002887 */
David Woodhouse875764d2009-06-28 21:20:51 +01002888 iova = alloc_iova(&domain->iovad, nrpages,
2889 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2890 if (iova)
2891 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002892 }
David Woodhouse875764d2009-06-28 21:20:51 +01002893 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2894 if (unlikely(!iova)) {
2895 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07002896 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002897 return NULL;
2898 }
2899
2900 return iova;
2901}
2902
David Woodhoused4b709f2014-03-09 16:07:40 -07002903static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002904{
2905 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002906 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002907
David Woodhoused4b709f2014-03-09 16:07:40 -07002908 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002909 if (!domain) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002910 printk(KERN_ERR "Allocating domain for %s failed",
2911 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002912 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002913 }
2914
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002915 /* make sure context mapping is ok */
David Woodhoused4b709f2014-03-09 16:07:40 -07002916 if (unlikely(!domain_context_mapped(dev))) {
2917 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002918 if (ret) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002919 printk(KERN_ERR "Domain context map for %s failed",
2920 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002921 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002922 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002923 }
2924
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002925 return domain;
2926}
2927
David Woodhoused4b709f2014-03-09 16:07:40 -07002928static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01002929{
2930 struct device_domain_info *info;
2931
2932 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07002933 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01002934 if (likely(info))
2935 return info->domain;
2936
2937 return __get_valid_domain_for_dev(dev);
2938}
2939
David Woodhouse3d891942014-03-06 15:59:26 +00002940static int iommu_dummy(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002941{
David Woodhouse3d891942014-03-06 15:59:26 +00002942 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002943}
2944
David Woodhouseecb509e2014-03-09 16:29:55 -07002945/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002946static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002947{
2948 int found;
2949
David Woodhouse3d891942014-03-06 15:59:26 +00002950 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002951 return 1;
2952
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002953 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002954 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002955
David Woodhouse9b226622014-03-09 14:03:28 -07002956 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002957 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07002958 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002959 return 1;
2960 else {
2961 /*
2962 * 32 bit DMA is removed from si_domain and fall back
2963 * to non-identity mapping.
2964 */
David Woodhousebf9c9ed2014-03-09 16:19:13 -07002965 domain_remove_one_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002966 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07002967 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002968 return 0;
2969 }
2970 } else {
2971 /*
2972 * In case of a detached 64 bit DMA device from vm, the device
2973 * is put into si_domain for identity mapping.
2974 */
David Woodhouseecb509e2014-03-09 16:29:55 -07002975 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002976 int ret;
David Woodhouse5913c9b2014-03-09 16:27:31 -07002977 ret = domain_add_dev_info(si_domain, dev,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002978 hw_pass_through ?
2979 CONTEXT_TT_PASS_THROUGH :
2980 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002981 if (!ret) {
2982 printk(KERN_INFO "64bit %s uses identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07002983 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002984 return 1;
2985 }
2986 }
2987 }
2988
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002989 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002990}
2991
David Woodhouse5040a912014-03-09 16:14:00 -07002992static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002993 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002994{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002995 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002996 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002997 struct iova *iova;
2998 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002999 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003000 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003001 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003002
3003 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003004
David Woodhouse5040a912014-03-09 16:14:00 -07003005 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003006 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003007
David Woodhouse5040a912014-03-09 16:14:00 -07003008 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003009 if (!domain)
3010 return 0;
3011
Weidong Han8c11e792008-12-08 15:29:22 +08003012 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003013 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003014
David Woodhouse5040a912014-03-09 16:14:00 -07003015 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003016 if (!iova)
3017 goto error;
3018
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003019 /*
3020 * Check if DMAR supports zero-length reads on write only
3021 * mappings..
3022 */
3023 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003024 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003025 prot |= DMA_PTE_READ;
3026 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3027 prot |= DMA_PTE_WRITE;
3028 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003029 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003030 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003031 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003032 * is not a big problem
3033 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003034 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003035 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003036 if (ret)
3037 goto error;
3038
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003039 /* it's a non-present to present mapping. Only flush if caching mode */
3040 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003041 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003042 else
Weidong Han8c11e792008-12-08 15:29:22 +08003043 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003044
David Woodhouse03d6a242009-06-28 15:33:46 +01003045 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3046 start_paddr += paddr & ~PAGE_MASK;
3047 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003048
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003049error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003050 if (iova)
3051 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00003052 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003053 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003054 return 0;
3055}
3056
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003057static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3058 unsigned long offset, size_t size,
3059 enum dma_data_direction dir,
3060 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003061{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003062 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003063 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003064}
3065
mark gross5e0d2a62008-03-04 15:22:08 -08003066static void flush_unmaps(void)
3067{
mark gross80b20dd2008-04-18 13:53:58 -07003068 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003069
mark gross5e0d2a62008-03-04 15:22:08 -08003070 timer_on = 0;
3071
3072 /* just flush them all */
3073 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003074 struct intel_iommu *iommu = g_iommus[i];
3075 if (!iommu)
3076 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003077
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003078 if (!deferred_flush[i].next)
3079 continue;
3080
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003081 /* In caching mode, global flushes turn emulation expensive */
3082 if (!cap_caching_mode(iommu->cap))
3083 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003084 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003085 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003086 unsigned long mask;
3087 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003088 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003089
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003090 /* On real hardware multiple invalidations are expensive */
3091 if (cap_caching_mode(iommu->cap))
3092 iommu_flush_iotlb_psi(iommu, domain->id,
David Woodhouseea8ea462014-03-05 17:09:32 +00003093 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1,
3094 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003095 else {
3096 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
3097 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3098 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3099 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003100 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003101 if (deferred_flush[i].freelist[j])
3102 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003103 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003104 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003105 }
3106
mark gross5e0d2a62008-03-04 15:22:08 -08003107 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003108}
3109
3110static void flush_unmaps_timeout(unsigned long data)
3111{
mark gross80b20dd2008-04-18 13:53:58 -07003112 unsigned long flags;
3113
3114 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003115 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003116 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003117}
3118
David Woodhouseea8ea462014-03-05 17:09:32 +00003119static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003120{
3121 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003122 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003123 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003124
3125 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003126 if (list_size == HIGH_WATER_MARK)
3127 flush_unmaps();
3128
Weidong Han8c11e792008-12-08 15:29:22 +08003129 iommu = domain_get_iommu(dom);
3130 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003131
mark gross80b20dd2008-04-18 13:53:58 -07003132 next = deferred_flush[iommu_id].next;
3133 deferred_flush[iommu_id].domain[next] = dom;
3134 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003135 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003136 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003137
3138 if (!timer_on) {
3139 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3140 timer_on = 1;
3141 }
3142 list_size++;
3143 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3144}
3145
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003146static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3147 size_t size, enum dma_data_direction dir,
3148 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003149{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003150 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003151 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003152 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003153 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003154 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003155
David Woodhouse73676832009-07-04 14:08:36 +01003156 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003157 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003158
David Woodhouse1525a292014-03-06 16:19:30 +00003159 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003160 BUG_ON(!domain);
3161
Weidong Han8c11e792008-12-08 15:29:22 +08003162 iommu = domain_get_iommu(domain);
3163
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003164 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003165 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3166 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003167 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003168
David Woodhoused794dc92009-06-28 00:27:49 +01003169 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3170 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003171
David Woodhoused794dc92009-06-28 00:27:49 +01003172 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003173 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003174
David Woodhouseea8ea462014-03-05 17:09:32 +00003175 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003176
mark gross5e0d2a62008-03-04 15:22:08 -08003177 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01003178 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003179 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003180 /* free iova */
3181 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003182 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003183 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003184 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003185 /*
3186 * queue up the release of the unmap to save the 1/6th of the
3187 * cpu used up by the iotlb flush operation...
3188 */
mark gross5e0d2a62008-03-04 15:22:08 -08003189 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003190}
3191
David Woodhouse5040a912014-03-09 16:14:00 -07003192static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003193 dma_addr_t *dma_handle, gfp_t flags,
3194 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003195{
Akinobu Mita36746432014-06-04 16:06:51 -07003196 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003197 int order;
3198
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003199 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003200 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003201
David Woodhouse5040a912014-03-09 16:14:00 -07003202 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003203 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003204 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3205 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003206 flags |= GFP_DMA;
3207 else
3208 flags |= GFP_DMA32;
3209 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003210
Akinobu Mita36746432014-06-04 16:06:51 -07003211 if (flags & __GFP_WAIT) {
3212 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003213
Akinobu Mita36746432014-06-04 16:06:51 -07003214 page = dma_alloc_from_contiguous(dev, count, order);
3215 if (page && iommu_no_mapping(dev) &&
3216 page_to_phys(page) + size > dev->coherent_dma_mask) {
3217 dma_release_from_contiguous(dev, page, count);
3218 page = NULL;
3219 }
3220 }
3221
3222 if (!page)
3223 page = alloc_pages(flags, order);
3224 if (!page)
3225 return NULL;
3226 memset(page_address(page), 0, size);
3227
3228 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003229 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003230 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003231 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003232 return page_address(page);
3233 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3234 __free_pages(page, order);
3235
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003236 return NULL;
3237}
3238
David Woodhouse5040a912014-03-09 16:14:00 -07003239static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003240 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003241{
3242 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003243 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003244
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003245 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003246 order = get_order(size);
3247
David Woodhouse5040a912014-03-09 16:14:00 -07003248 intel_unmap_page(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Akinobu Mita36746432014-06-04 16:06:51 -07003249 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3250 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003251}
3252
David Woodhouse5040a912014-03-09 16:14:00 -07003253static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003254 int nelems, enum dma_data_direction dir,
3255 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003256{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003257 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003258 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003259 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003260 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003261 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003262
David Woodhouse5040a912014-03-09 16:14:00 -07003263 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003264 return;
3265
David Woodhouse5040a912014-03-09 16:14:00 -07003266 domain = find_domain(dev);
Weidong Han8c11e792008-12-08 15:29:22 +08003267 BUG_ON(!domain);
3268
3269 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003270
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003271 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01003272 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3273 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003274 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003275
David Woodhoused794dc92009-06-28 00:27:49 +01003276 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3277 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003278
David Woodhouseea8ea462014-03-05 17:09:32 +00003279 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003280
David Woodhouseacea0012009-07-14 01:55:11 +01003281 if (intel_iommu_strict) {
3282 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003283 last_pfn - start_pfn + 1, !freelist, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003284 /* free iova */
3285 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003286 dma_free_pagelist(freelist);
David Woodhouseacea0012009-07-14 01:55:11 +01003287 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003288 add_unmap(domain, iova, freelist);
David Woodhouseacea0012009-07-14 01:55:11 +01003289 /*
3290 * queue up the release of the unmap to save the 1/6th of the
3291 * cpu used up by the iotlb flush operation...
3292 */
3293 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003294}
3295
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003296static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003297 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003298{
3299 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003300 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003301
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003302 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003303 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003304 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003305 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003306 }
3307 return nelems;
3308}
3309
David Woodhouse5040a912014-03-09 16:14:00 -07003310static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003311 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003312{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003313 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003314 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003315 size_t size = 0;
3316 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003317 struct iova *iova = NULL;
3318 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003319 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003320 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003321 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003322
3323 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003324 if (iommu_no_mapping(dev))
3325 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003326
David Woodhouse5040a912014-03-09 16:14:00 -07003327 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003328 if (!domain)
3329 return 0;
3330
Weidong Han8c11e792008-12-08 15:29:22 +08003331 iommu = domain_get_iommu(domain);
3332
David Woodhouseb536d242009-06-28 14:49:31 +01003333 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003334 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003335
David Woodhouse5040a912014-03-09 16:14:00 -07003336 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3337 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003338 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003339 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003340 return 0;
3341 }
3342
3343 /*
3344 * Check if DMAR supports zero-length reads on write only
3345 * mappings..
3346 */
3347 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003348 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003349 prot |= DMA_PTE_READ;
3350 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3351 prot |= DMA_PTE_WRITE;
3352
David Woodhouseb536d242009-06-28 14:49:31 +01003353 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003354
Fenghua Yuf5329592009-08-04 15:09:37 -07003355 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003356 if (unlikely(ret)) {
3357 /* clear the page */
3358 dma_pte_clear_range(domain, start_vpfn,
3359 start_vpfn + size - 1);
3360 /* free page tables */
3361 dma_pte_free_pagetable(domain, start_vpfn,
3362 start_vpfn + size - 1);
3363 /* free iova */
3364 __free_iova(&domain->iovad, iova);
3365 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003366 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003367
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003368 /* it's a non-present to present mapping. Only flush if caching mode */
3369 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003370 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003371 else
Weidong Han8c11e792008-12-08 15:29:22 +08003372 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003373
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003374 return nelems;
3375}
3376
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003377static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3378{
3379 return !dma_addr;
3380}
3381
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003382struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003383 .alloc = intel_alloc_coherent,
3384 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003385 .map_sg = intel_map_sg,
3386 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003387 .map_page = intel_map_page,
3388 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003389 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003390};
3391
3392static inline int iommu_domain_cache_init(void)
3393{
3394 int ret = 0;
3395
3396 iommu_domain_cache = kmem_cache_create("iommu_domain",
3397 sizeof(struct dmar_domain),
3398 0,
3399 SLAB_HWCACHE_ALIGN,
3400
3401 NULL);
3402 if (!iommu_domain_cache) {
3403 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3404 ret = -ENOMEM;
3405 }
3406
3407 return ret;
3408}
3409
3410static inline int iommu_devinfo_cache_init(void)
3411{
3412 int ret = 0;
3413
3414 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3415 sizeof(struct device_domain_info),
3416 0,
3417 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003418 NULL);
3419 if (!iommu_devinfo_cache) {
3420 printk(KERN_ERR "Couldn't create devinfo cache\n");
3421 ret = -ENOMEM;
3422 }
3423
3424 return ret;
3425}
3426
3427static inline int iommu_iova_cache_init(void)
3428{
3429 int ret = 0;
3430
3431 iommu_iova_cache = kmem_cache_create("iommu_iova",
3432 sizeof(struct iova),
3433 0,
3434 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003435 NULL);
3436 if (!iommu_iova_cache) {
3437 printk(KERN_ERR "Couldn't create iova cache\n");
3438 ret = -ENOMEM;
3439 }
3440
3441 return ret;
3442}
3443
3444static int __init iommu_init_mempool(void)
3445{
3446 int ret;
3447 ret = iommu_iova_cache_init();
3448 if (ret)
3449 return ret;
3450
3451 ret = iommu_domain_cache_init();
3452 if (ret)
3453 goto domain_error;
3454
3455 ret = iommu_devinfo_cache_init();
3456 if (!ret)
3457 return ret;
3458
3459 kmem_cache_destroy(iommu_domain_cache);
3460domain_error:
3461 kmem_cache_destroy(iommu_iova_cache);
3462
3463 return -ENOMEM;
3464}
3465
3466static void __init iommu_exit_mempool(void)
3467{
3468 kmem_cache_destroy(iommu_devinfo_cache);
3469 kmem_cache_destroy(iommu_domain_cache);
3470 kmem_cache_destroy(iommu_iova_cache);
3471
3472}
3473
Dan Williams556ab452010-07-23 15:47:56 -07003474static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3475{
3476 struct dmar_drhd_unit *drhd;
3477 u32 vtbar;
3478 int rc;
3479
3480 /* We know that this device on this chipset has its own IOMMU.
3481 * If we find it under a different IOMMU, then the BIOS is lying
3482 * to us. Hope that the IOMMU for this device is actually
3483 * disabled, and it needs no translation...
3484 */
3485 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3486 if (rc) {
3487 /* "can't" happen */
3488 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3489 return;
3490 }
3491 vtbar &= 0xffff0000;
3492
3493 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3494 drhd = dmar_find_matched_drhd_unit(pdev);
3495 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3496 TAINT_FIRMWARE_WORKAROUND,
3497 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3498 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3499}
3500DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3501
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003502static void __init init_no_remapping_devices(void)
3503{
3504 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003505 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003506 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003507
3508 for_each_drhd_unit(drhd) {
3509 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003510 for_each_active_dev_scope(drhd->devices,
3511 drhd->devices_cnt, i, dev)
3512 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003513 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003514 if (i == drhd->devices_cnt)
3515 drhd->ignored = 1;
3516 }
3517 }
3518
Jiang Liu7c919772014-01-06 14:18:18 +08003519 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003520 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003521 continue;
3522
Jiang Liub683b232014-02-19 14:07:32 +08003523 for_each_active_dev_scope(drhd->devices,
3524 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003525 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003526 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003527 if (i < drhd->devices_cnt)
3528 continue;
3529
David Woodhousec0771df2011-10-14 20:59:46 +01003530 /* This IOMMU has *only* gfx devices. Either bypass it or
3531 set the gfx_mapped flag, as appropriate */
3532 if (dmar_map_gfx) {
3533 intel_iommu_gfx_mapped = 1;
3534 } else {
3535 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003536 for_each_active_dev_scope(drhd->devices,
3537 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003538 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003539 }
3540 }
3541}
3542
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003543#ifdef CONFIG_SUSPEND
3544static int init_iommu_hw(void)
3545{
3546 struct dmar_drhd_unit *drhd;
3547 struct intel_iommu *iommu = NULL;
3548
3549 for_each_active_iommu(iommu, drhd)
3550 if (iommu->qi)
3551 dmar_reenable_qi(iommu);
3552
Joseph Cihulab7792602011-05-03 00:08:37 -07003553 for_each_iommu(iommu, drhd) {
3554 if (drhd->ignored) {
3555 /*
3556 * we always have to disable PMRs or DMA may fail on
3557 * this device
3558 */
3559 if (force_on)
3560 iommu_disable_protect_mem_regions(iommu);
3561 continue;
3562 }
3563
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003564 iommu_flush_write_buffer(iommu);
3565
3566 iommu_set_root_entry(iommu);
3567
3568 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003569 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003570 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003571 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003572 if (iommu_enable_translation(iommu))
3573 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003574 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003575 }
3576
3577 return 0;
3578}
3579
3580static void iommu_flush_all(void)
3581{
3582 struct dmar_drhd_unit *drhd;
3583 struct intel_iommu *iommu;
3584
3585 for_each_active_iommu(iommu, drhd) {
3586 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003587 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003588 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003589 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003590 }
3591}
3592
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003593static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003594{
3595 struct dmar_drhd_unit *drhd;
3596 struct intel_iommu *iommu = NULL;
3597 unsigned long flag;
3598
3599 for_each_active_iommu(iommu, drhd) {
3600 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3601 GFP_ATOMIC);
3602 if (!iommu->iommu_state)
3603 goto nomem;
3604 }
3605
3606 iommu_flush_all();
3607
3608 for_each_active_iommu(iommu, drhd) {
3609 iommu_disable_translation(iommu);
3610
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003611 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003612
3613 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3614 readl(iommu->reg + DMAR_FECTL_REG);
3615 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3616 readl(iommu->reg + DMAR_FEDATA_REG);
3617 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3618 readl(iommu->reg + DMAR_FEADDR_REG);
3619 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3620 readl(iommu->reg + DMAR_FEUADDR_REG);
3621
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003622 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003623 }
3624 return 0;
3625
3626nomem:
3627 for_each_active_iommu(iommu, drhd)
3628 kfree(iommu->iommu_state);
3629
3630 return -ENOMEM;
3631}
3632
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003633static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003634{
3635 struct dmar_drhd_unit *drhd;
3636 struct intel_iommu *iommu = NULL;
3637 unsigned long flag;
3638
3639 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003640 if (force_on)
3641 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3642 else
3643 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003644 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003645 }
3646
3647 for_each_active_iommu(iommu, drhd) {
3648
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003649 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003650
3651 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3652 iommu->reg + DMAR_FECTL_REG);
3653 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3654 iommu->reg + DMAR_FEDATA_REG);
3655 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3656 iommu->reg + DMAR_FEADDR_REG);
3657 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3658 iommu->reg + DMAR_FEUADDR_REG);
3659
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003660 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003661 }
3662
3663 for_each_active_iommu(iommu, drhd)
3664 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003665}
3666
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003667static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003668 .resume = iommu_resume,
3669 .suspend = iommu_suspend,
3670};
3671
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003672static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003673{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003674 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003675}
3676
3677#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003678static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003679#endif /* CONFIG_PM */
3680
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003681
3682int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3683{
3684 struct acpi_dmar_reserved_memory *rmrr;
3685 struct dmar_rmrr_unit *rmrru;
3686
3687 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3688 if (!rmrru)
3689 return -ENOMEM;
3690
3691 rmrru->hdr = header;
3692 rmrr = (struct acpi_dmar_reserved_memory *)header;
3693 rmrru->base_address = rmrr->base_address;
3694 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003695 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3696 ((void *)rmrr) + rmrr->header.length,
3697 &rmrru->devices_cnt);
3698 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3699 kfree(rmrru);
3700 return -ENOMEM;
3701 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003702
Jiang Liu2e455282014-02-19 14:07:36 +08003703 list_add(&rmrru->list, &dmar_rmrr_units);
3704
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003705 return 0;
3706}
3707
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003708int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3709{
3710 struct acpi_dmar_atsr *atsr;
3711 struct dmar_atsr_unit *atsru;
3712
3713 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3714 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3715 if (!atsru)
3716 return -ENOMEM;
3717
3718 atsru->hdr = hdr;
3719 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08003720 if (!atsru->include_all) {
3721 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3722 (void *)atsr + atsr->header.length,
3723 &atsru->devices_cnt);
3724 if (atsru->devices_cnt && atsru->devices == NULL) {
3725 kfree(atsru);
3726 return -ENOMEM;
3727 }
3728 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003729
Jiang Liu0e242612014-02-19 14:07:34 +08003730 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003731
3732 return 0;
3733}
3734
Jiang Liu9bdc5312014-01-06 14:18:27 +08003735static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3736{
3737 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3738 kfree(atsru);
3739}
3740
3741static void intel_iommu_free_dmars(void)
3742{
3743 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3744 struct dmar_atsr_unit *atsru, *atsr_n;
3745
3746 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3747 list_del(&rmrru->list);
3748 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3749 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003750 }
3751
Jiang Liu9bdc5312014-01-06 14:18:27 +08003752 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3753 list_del(&atsru->list);
3754 intel_iommu_free_atsr(atsru);
3755 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003756}
3757
3758int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3759{
Jiang Liub683b232014-02-19 14:07:32 +08003760 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003761 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00003762 struct pci_dev *bridge = NULL;
3763 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003764 struct acpi_dmar_atsr *atsr;
3765 struct dmar_atsr_unit *atsru;
3766
3767 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003768 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08003769 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003770 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003771 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003772 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003773 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003774 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003775 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08003776 if (!bridge)
3777 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003778
Jiang Liu0e242612014-02-19 14:07:34 +08003779 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08003780 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3781 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3782 if (atsr->segment != pci_domain_nr(dev->bus))
3783 continue;
3784
Jiang Liub683b232014-02-19 14:07:32 +08003785 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00003786 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08003787 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003788
3789 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08003790 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003791 }
Jiang Liub683b232014-02-19 14:07:32 +08003792 ret = 0;
3793out:
Jiang Liu0e242612014-02-19 14:07:34 +08003794 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003795
Jiang Liub683b232014-02-19 14:07:32 +08003796 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003797}
3798
Jiang Liu59ce0512014-02-19 14:07:35 +08003799int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3800{
3801 int ret = 0;
3802 struct dmar_rmrr_unit *rmrru;
3803 struct dmar_atsr_unit *atsru;
3804 struct acpi_dmar_atsr *atsr;
3805 struct acpi_dmar_reserved_memory *rmrr;
3806
3807 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3808 return 0;
3809
3810 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3811 rmrr = container_of(rmrru->hdr,
3812 struct acpi_dmar_reserved_memory, header);
3813 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3814 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3815 ((void *)rmrr) + rmrr->header.length,
3816 rmrr->segment, rmrru->devices,
3817 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08003818 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08003819 return ret;
3820 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08003821 dmar_remove_dev_scope(info, rmrr->segment,
3822 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08003823 }
3824 }
3825
3826 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3827 if (atsru->include_all)
3828 continue;
3829
3830 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3831 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3832 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3833 (void *)atsr + atsr->header.length,
3834 atsr->segment, atsru->devices,
3835 atsru->devices_cnt);
3836 if (ret > 0)
3837 break;
3838 else if(ret < 0)
3839 return ret;
3840 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3841 if (dmar_remove_dev_scope(info, atsr->segment,
3842 atsru->devices, atsru->devices_cnt))
3843 break;
3844 }
3845 }
3846
3847 return 0;
3848}
3849
Fenghua Yu99dcade2009-11-11 07:23:06 -08003850/*
3851 * Here we only respond to action of unbound device from driver.
3852 *
3853 * Added device is not attached to its DMAR domain here yet. That will happen
3854 * when mapping the device to iova.
3855 */
3856static int device_notifier(struct notifier_block *nb,
3857 unsigned long action, void *data)
3858{
3859 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08003860 struct dmar_domain *domain;
3861
David Woodhouse3d891942014-03-06 15:59:26 +00003862 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00003863 return 0;
3864
Jiang Liu7e7dfab2014-02-19 14:07:23 +08003865 if (action != BUS_NOTIFY_UNBOUND_DRIVER &&
3866 action != BUS_NOTIFY_DEL_DEVICE)
3867 return 0;
3868
David Woodhouse1525a292014-03-06 16:19:30 +00003869 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08003870 if (!domain)
3871 return 0;
3872
Jiang Liu3a5670e2014-02-19 14:07:33 +08003873 down_read(&dmar_global_lock);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07003874 domain_remove_one_dev_info(domain, dev);
Jiang Liu7e7dfab2014-02-19 14:07:23 +08003875 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3876 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3877 list_empty(&domain->devices))
3878 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08003879 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07003880
Fenghua Yu99dcade2009-11-11 07:23:06 -08003881 return 0;
3882}
3883
3884static struct notifier_block device_nb = {
3885 .notifier_call = device_notifier,
3886};
3887
Jiang Liu75f05562014-02-19 14:07:37 +08003888static int intel_iommu_memory_notifier(struct notifier_block *nb,
3889 unsigned long val, void *v)
3890{
3891 struct memory_notify *mhp = v;
3892 unsigned long long start, end;
3893 unsigned long start_vpfn, last_vpfn;
3894
3895 switch (val) {
3896 case MEM_GOING_ONLINE:
3897 start = mhp->start_pfn << PAGE_SHIFT;
3898 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
3899 if (iommu_domain_identity_map(si_domain, start, end)) {
3900 pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
3901 start, end);
3902 return NOTIFY_BAD;
3903 }
3904 break;
3905
3906 case MEM_OFFLINE:
3907 case MEM_CANCEL_ONLINE:
3908 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
3909 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
3910 while (start_vpfn <= last_vpfn) {
3911 struct iova *iova;
3912 struct dmar_drhd_unit *drhd;
3913 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003914 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08003915
3916 iova = find_iova(&si_domain->iovad, start_vpfn);
3917 if (iova == NULL) {
3918 pr_debug("dmar: failed get IOVA for PFN %lx\n",
3919 start_vpfn);
3920 break;
3921 }
3922
3923 iova = split_and_remove_iova(&si_domain->iovad, iova,
3924 start_vpfn, last_vpfn);
3925 if (iova == NULL) {
3926 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
3927 start_vpfn, last_vpfn);
3928 return NOTIFY_BAD;
3929 }
3930
David Woodhouseea8ea462014-03-05 17:09:32 +00003931 freelist = domain_unmap(si_domain, iova->pfn_lo,
3932 iova->pfn_hi);
3933
Jiang Liu75f05562014-02-19 14:07:37 +08003934 rcu_read_lock();
3935 for_each_active_iommu(iommu, drhd)
3936 iommu_flush_iotlb_psi(iommu, si_domain->id,
3937 iova->pfn_lo,
David Woodhouseea8ea462014-03-05 17:09:32 +00003938 iova->pfn_hi - iova->pfn_lo + 1,
3939 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08003940 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00003941 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08003942
3943 start_vpfn = iova->pfn_hi + 1;
3944 free_iova_mem(iova);
3945 }
3946 break;
3947 }
3948
3949 return NOTIFY_OK;
3950}
3951
3952static struct notifier_block intel_iommu_memory_nb = {
3953 .notifier_call = intel_iommu_memory_notifier,
3954 .priority = 0
3955};
3956
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003957int __init intel_iommu_init(void)
3958{
Jiang Liu9bdc5312014-01-06 14:18:27 +08003959 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09003960 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08003961 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003962
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003963 /* VT-d is required for a TXT/tboot launch, so enforce that */
3964 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003965
Jiang Liu3a5670e2014-02-19 14:07:33 +08003966 if (iommu_init_mempool()) {
3967 if (force_on)
3968 panic("tboot: Failed to initialize iommu memory\n");
3969 return -ENOMEM;
3970 }
3971
3972 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003973 if (dmar_table_init()) {
3974 if (force_on)
3975 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003976 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003977 }
3978
Takao Indoh3a93c842013-04-23 17:35:03 +09003979 /*
3980 * Disable translation if already enabled prior to OS handover.
3981 */
Jiang Liu7c919772014-01-06 14:18:18 +08003982 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09003983 if (iommu->gcmd & DMA_GCMD_TE)
3984 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09003985
Suresh Siddhac2c72862011-08-23 17:05:19 -07003986 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003987 if (force_on)
3988 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003989 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003990 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003991
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003992 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08003993 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07003994
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003995 if (list_empty(&dmar_rmrr_units))
3996 printk(KERN_INFO "DMAR: No RMRR found\n");
3997
3998 if (list_empty(&dmar_atsr_units))
3999 printk(KERN_INFO "DMAR: No ATSR found\n");
4000
Joseph Cihula51a63e62011-03-21 11:04:24 -07004001 if (dmar_init_reserved_ranges()) {
4002 if (force_on)
4003 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004004 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004005 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004006
4007 init_no_remapping_devices();
4008
Joseph Cihulab7792602011-05-03 00:08:37 -07004009 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004010 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004011 if (force_on)
4012 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004013 printk(KERN_ERR "IOMMU: dmar init failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004014 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004015 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004016 up_write(&dmar_global_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004017 printk(KERN_INFO
4018 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4019
mark gross5e0d2a62008-03-04 15:22:08 -08004020 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004021#ifdef CONFIG_SWIOTLB
4022 swiotlb = 0;
4023#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004024 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004025
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004026 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004027
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004028 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004029 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004030 if (si_domain && !hw_pass_through)
4031 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004032
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004033 intel_iommu_enabled = 1;
4034
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004035 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004036
4037out_free_reserved_range:
4038 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004039out_free_dmar:
4040 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004041 up_write(&dmar_global_lock);
4042 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004043 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004044}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004045
Han, Weidong3199aa62009-02-26 17:31:12 +08004046static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004047 struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004048{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004049 struct pci_dev *tmp, *parent, *pdev;
Han, Weidong3199aa62009-02-26 17:31:12 +08004050
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004051 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004052 return;
4053
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004054 pdev = to_pci_dev(dev);
4055
Han, Weidong3199aa62009-02-26 17:31:12 +08004056 /* dependent device detach */
4057 tmp = pci_find_upstream_pcie_bridge(pdev);
4058 /* Secondary interface's bus number and devfn 0 */
4059 if (tmp) {
4060 parent = pdev->bus->self;
4061 while (parent != tmp) {
4062 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01004063 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08004064 parent = parent->bus->self;
4065 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05004066 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08004067 iommu_detach_dev(iommu,
4068 tmp->subordinate->number, 0);
4069 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01004070 iommu_detach_dev(iommu, tmp->bus->number,
4071 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08004072 }
4073}
4074
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004075static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004076 struct device *dev)
Weidong Hanc7151a82008-12-08 22:51:37 +08004077{
Yijing Wangbca2b912013-10-31 17:26:04 +08004078 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08004079 struct intel_iommu *iommu;
4080 unsigned long flags;
4081 int found = 0;
David Woodhouse156baca2014-03-09 14:00:57 -07004082 u8 bus, devfn;
Weidong Hanc7151a82008-12-08 22:51:37 +08004083
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004084 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08004085 if (!iommu)
4086 return;
4087
4088 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08004089 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004090 if (info->iommu == iommu && info->bus == bus &&
4091 info->devfn == devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01004092 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004093 spin_unlock_irqrestore(&device_domain_lock, flags);
4094
Yu Zhao93a23a72009-05-18 13:51:37 +08004095 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004096 iommu_detach_dev(iommu, info->bus, info->devfn);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004097 iommu_detach_dependent_devices(iommu, dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08004098 free_devinfo_mem(info);
4099
4100 spin_lock_irqsave(&device_domain_lock, flags);
4101
4102 if (found)
4103 break;
4104 else
4105 continue;
4106 }
4107
4108 /* if there is no other devices under the same iommu
4109 * owned by this domain, clear this iommu in iommu_bmp
4110 * update iommu count and coherency
4111 */
David Woodhouse8bbc4412014-03-09 13:52:37 -07004112 if (info->iommu == iommu)
Weidong Hanc7151a82008-12-08 22:51:37 +08004113 found = 1;
4114 }
4115
Roland Dreier3e7abe22011-07-20 06:22:21 -07004116 spin_unlock_irqrestore(&device_domain_lock, flags);
4117
Weidong Hanc7151a82008-12-08 22:51:37 +08004118 if (found == 0) {
4119 unsigned long tmp_flags;
4120 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08004121 clear_bit(iommu->seq_id, domain->iommu_bmp);
Weidong Hanc7151a82008-12-08 22:51:37 +08004122 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08004123 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08004124 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07004125
Alex Williamson9b4554b2011-05-24 12:19:04 -04004126 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
4127 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
4128 spin_lock_irqsave(&iommu->lock, tmp_flags);
4129 clear_bit(domain->id, iommu->domain_ids);
4130 iommu->domains[domain->id] = NULL;
4131 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
4132 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004133 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004134}
4135
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004136static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08004137{
4138 int adjust_width;
4139
4140 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004141 domain_reserve_special_ranges(domain);
4142
4143 /* calculate AGAW */
4144 domain->gaw = guest_width;
4145 adjust_width = guestwidth_to_adjustwidth(guest_width);
4146 domain->agaw = width_to_agaw(adjust_width);
4147
Weidong Han5e98c4b2008-12-08 23:03:27 +08004148 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004149 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004150 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004151 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004152
4153 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004154 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004155 if (!domain->pgd)
4156 return -ENOMEM;
4157 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4158 return 0;
4159}
4160
Joerg Roedel5d450802008-12-03 14:52:32 +01004161static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004162{
Joerg Roedel5d450802008-12-03 14:52:32 +01004163 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004164
Jiang Liu92d03cc2014-02-19 14:07:28 +08004165 dmar_domain = alloc_domain(true);
Joerg Roedel5d450802008-12-03 14:52:32 +01004166 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03004167 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004168 "intel_iommu_domain_init: dmar_domain == NULL\n");
4169 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004170 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004171 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03004172 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004173 "intel_iommu_domain_init() failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004174 domain_exit(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004175 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004176 }
Allen Kay8140a952011-10-14 12:32:17 -07004177 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004178 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004179
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004180 domain->geometry.aperture_start = 0;
4181 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4182 domain->geometry.force_aperture = true;
4183
Joerg Roedel5d450802008-12-03 14:52:32 +01004184 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004185}
Kay, Allen M38717942008-09-09 18:37:29 +03004186
Joerg Roedel5d450802008-12-03 14:52:32 +01004187static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004188{
Joerg Roedel5d450802008-12-03 14:52:32 +01004189 struct dmar_domain *dmar_domain = domain->priv;
4190
4191 domain->priv = NULL;
Jiang Liu92d03cc2014-02-19 14:07:28 +08004192 domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004193}
Kay, Allen M38717942008-09-09 18:37:29 +03004194
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004195static int intel_iommu_attach_device(struct iommu_domain *domain,
4196 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004197{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004198 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004199 struct intel_iommu *iommu;
4200 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004201 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004202
David Woodhouse7207d8f2014-03-09 16:31:06 -07004203 /* normally dev is not mapped */
4204 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004205 struct dmar_domain *old_domain;
4206
David Woodhouse1525a292014-03-06 16:19:30 +00004207 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004208 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004209 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
4210 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004211 domain_remove_one_dev_info(old_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004212 else
4213 domain_remove_dev_info(old_domain);
4214 }
4215 }
4216
David Woodhouse156baca2014-03-09 14:00:57 -07004217 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004218 if (!iommu)
4219 return -ENODEV;
4220
4221 /* check if this iommu agaw is sufficient for max mapped address */
4222 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004223 if (addr_width > cap_mgaw(iommu->cap))
4224 addr_width = cap_mgaw(iommu->cap);
4225
4226 if (dmar_domain->max_addr > (1LL << addr_width)) {
4227 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004228 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004229 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004230 return -EFAULT;
4231 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004232 dmar_domain->gaw = addr_width;
4233
4234 /*
4235 * Knock out extra levels of page tables if necessary
4236 */
4237 while (iommu->agaw < dmar_domain->agaw) {
4238 struct dma_pte *pte;
4239
4240 pte = dmar_domain->pgd;
4241 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004242 dmar_domain->pgd = (struct dma_pte *)
4243 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004244 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004245 }
4246 dmar_domain->agaw--;
4247 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004248
David Woodhouse5913c9b2014-03-09 16:27:31 -07004249 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004250}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004251
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004252static void intel_iommu_detach_device(struct iommu_domain *domain,
4253 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004254{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004255 struct dmar_domain *dmar_domain = domain->priv;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004256
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004257 domain_remove_one_dev_info(dmar_domain, dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004258}
Kay, Allen M38717942008-09-09 18:37:29 +03004259
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004260static int intel_iommu_map(struct iommu_domain *domain,
4261 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004262 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004263{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004264 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004265 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004266 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004267 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004268
Joerg Roedeldde57a22008-12-03 15:04:09 +01004269 if (iommu_prot & IOMMU_READ)
4270 prot |= DMA_PTE_READ;
4271 if (iommu_prot & IOMMU_WRITE)
4272 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08004273 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4274 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004275
David Woodhouse163cc522009-06-28 00:51:17 +01004276 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004277 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004278 u64 end;
4279
4280 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004281 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004282 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004283 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004284 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004285 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004286 return -EFAULT;
4287 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004288 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004289 }
David Woodhousead051222009-06-28 14:22:28 +01004290 /* Round up size to next multiple of PAGE_SIZE, if it and
4291 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004292 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004293 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4294 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004295 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004296}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004297
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004298static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004299 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004300{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004301 struct dmar_domain *dmar_domain = domain->priv;
David Woodhouseea8ea462014-03-05 17:09:32 +00004302 struct page *freelist = NULL;
4303 struct intel_iommu *iommu;
4304 unsigned long start_pfn, last_pfn;
4305 unsigned int npages;
4306 int iommu_id, num, ndomains, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004307
David Woodhouse5cf0a762014-03-19 16:07:49 +00004308 /* Cope with horrid API which requires us to unmap more than the
4309 size argument if it happens to be a large-page mapping. */
4310 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4311 BUG();
4312
4313 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4314 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4315
David Woodhouseea8ea462014-03-05 17:09:32 +00004316 start_pfn = iova >> VTD_PAGE_SHIFT;
4317 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4318
4319 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4320
4321 npages = last_pfn - start_pfn + 1;
4322
4323 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4324 iommu = g_iommus[iommu_id];
4325
4326 /*
4327 * find bit position of dmar_domain
4328 */
4329 ndomains = cap_ndoms(iommu->cap);
4330 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4331 if (iommu->domains[num] == dmar_domain)
4332 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4333 npages, !freelist, 0);
4334 }
4335
4336 }
4337
4338 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004339
David Woodhouse163cc522009-06-28 00:51:17 +01004340 if (dmar_domain->max_addr == iova + size)
4341 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004342
David Woodhouse5cf0a762014-03-19 16:07:49 +00004343 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004344}
Kay, Allen M38717942008-09-09 18:37:29 +03004345
Joerg Roedeld14d6572008-12-03 15:06:57 +01004346static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05304347 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004348{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004349 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004350 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004351 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004352 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004353
David Woodhouse5cf0a762014-03-19 16:07:49 +00004354 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004355 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004356 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004357
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004358 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004359}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004360
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004361static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4362 unsigned long cap)
4363{
4364 struct dmar_domain *dmar_domain = domain->priv;
4365
4366 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4367 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004368 if (cap == IOMMU_CAP_INTR_REMAP)
Suresh Siddha95a02e92012-03-30 11:47:07 -07004369 return irq_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004370
4371 return 0;
4372}
4373
Alex Williamson783f1572012-05-30 14:19:43 -06004374#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4375
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004376static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004377{
4378 struct pci_dev *pdev = to_pci_dev(dev);
Alex Williamson3da4af02012-11-13 10:22:03 -07004379 struct pci_dev *bridge, *dma_pdev = NULL;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004380 struct iommu_group *group;
4381 int ret;
David Woodhouse156baca2014-03-09 14:00:57 -07004382 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004383
David Woodhouse156baca2014-03-09 14:00:57 -07004384 if (!device_to_iommu(dev, &bus, &devfn))
Alex Williamson70ae6f02011-10-21 15:56:11 -04004385 return -ENODEV;
4386
4387 bridge = pci_find_upstream_pcie_bridge(pdev);
4388 if (bridge) {
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004389 if (pci_is_pcie(bridge))
4390 dma_pdev = pci_get_domain_bus_and_slot(
4391 pci_domain_nr(pdev->bus),
4392 bridge->subordinate->number, 0);
Alex Williamson3da4af02012-11-13 10:22:03 -07004393 if (!dma_pdev)
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004394 dma_pdev = pci_dev_get(bridge);
4395 } else
4396 dma_pdev = pci_dev_get(pdev);
4397
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004398 /* Account for quirked devices */
Alex Williamson783f1572012-05-30 14:19:43 -06004399 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
4400
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004401 /*
4402 * If it's a multifunction device that does not support our
Alex Williamsonc14d2692013-05-30 12:39:18 -06004403 * required ACS flags, add to the same group as lowest numbered
4404 * function that also does not suport the required ACS flags.
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004405 */
Alex Williamson783f1572012-05-30 14:19:43 -06004406 if (dma_pdev->multifunction &&
Alex Williamsonc14d2692013-05-30 12:39:18 -06004407 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
4408 u8 i, slot = PCI_SLOT(dma_pdev->devfn);
4409
4410 for (i = 0; i < 8; i++) {
4411 struct pci_dev *tmp;
4412
4413 tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
4414 if (!tmp)
4415 continue;
4416
4417 if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
4418 swap_pci_ref(&dma_pdev, tmp);
4419 break;
4420 }
4421 pci_dev_put(tmp);
4422 }
4423 }
Alex Williamson783f1572012-05-30 14:19:43 -06004424
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004425 /*
4426 * Devices on the root bus go through the iommu. If that's not us,
4427 * find the next upstream device and test ACS up to the root bus.
4428 * Finding the next device may require skipping virtual buses.
4429 */
Alex Williamson783f1572012-05-30 14:19:43 -06004430 while (!pci_is_root_bus(dma_pdev->bus)) {
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004431 struct pci_bus *bus = dma_pdev->bus;
4432
4433 while (!bus->self) {
4434 if (!pci_is_root_bus(bus))
4435 bus = bus->parent;
4436 else
4437 goto root_bus;
4438 }
4439
4440 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
Alex Williamson783f1572012-05-30 14:19:43 -06004441 break;
4442
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004443 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
Alex Williamson70ae6f02011-10-21 15:56:11 -04004444 }
4445
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004446root_bus:
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004447 group = iommu_group_get(&dma_pdev->dev);
4448 pci_dev_put(dma_pdev);
4449 if (!group) {
4450 group = iommu_group_alloc();
4451 if (IS_ERR(group))
4452 return PTR_ERR(group);
4453 }
Alex Williamsonbcb71ab2011-10-21 15:56:24 -04004454
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004455 ret = iommu_group_add_device(group, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004456
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004457 iommu_group_put(group);
4458 return ret;
4459}
4460
4461static void intel_iommu_remove_device(struct device *dev)
4462{
4463 iommu_group_remove_device(dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004464}
4465
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004466static struct iommu_ops intel_iommu_ops = {
4467 .domain_init = intel_iommu_domain_init,
4468 .domain_destroy = intel_iommu_domain_destroy,
4469 .attach_dev = intel_iommu_attach_device,
4470 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004471 .map = intel_iommu_map,
4472 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004473 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004474 .domain_has_cap = intel_iommu_domain_has_cap,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004475 .add_device = intel_iommu_add_device,
4476 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004477 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004478};
David Woodhouse9af88142009-02-13 23:18:03 +00004479
Daniel Vetter94526182013-01-20 23:50:13 +01004480static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4481{
4482 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4483 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4484 dmar_map_gfx = 0;
4485}
4486
4487DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4488DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4489DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4490DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4491DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4492DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4493DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4494
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004495static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004496{
4497 /*
4498 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004499 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004500 */
4501 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4502 rwbf_quirk = 1;
4503}
4504
4505DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004506DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4507DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4508DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4509DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4510DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4511DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004512
Adam Jacksoneecfd572010-08-25 21:17:34 +01004513#define GGC 0x52
4514#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4515#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4516#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4517#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4518#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4519#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4520#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4521#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4522
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004523static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004524{
4525 unsigned short ggc;
4526
Adam Jacksoneecfd572010-08-25 21:17:34 +01004527 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004528 return;
4529
Adam Jacksoneecfd572010-08-25 21:17:34 +01004530 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004531 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4532 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004533 } else if (dmar_map_gfx) {
4534 /* we have to ensure the gfx device is idle before we flush */
4535 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4536 intel_iommu_strict = 1;
4537 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004538}
4539DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4540DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4541DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4542DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4543
David Woodhousee0fc7e02009-09-30 09:12:17 -07004544/* On Tylersburg chipsets, some BIOSes have been known to enable the
4545 ISOCH DMAR unit for the Azalia sound device, but not give it any
4546 TLB entries, which causes it to deadlock. Check for that. We do
4547 this in a function called from init_dmars(), instead of in a PCI
4548 quirk, because we don't want to print the obnoxious "BIOS broken"
4549 message if VT-d is actually disabled.
4550*/
4551static void __init check_tylersburg_isoch(void)
4552{
4553 struct pci_dev *pdev;
4554 uint32_t vtisochctrl;
4555
4556 /* If there's no Azalia in the system anyway, forget it. */
4557 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4558 if (!pdev)
4559 return;
4560 pci_dev_put(pdev);
4561
4562 /* System Management Registers. Might be hidden, in which case
4563 we can't do the sanity check. But that's OK, because the
4564 known-broken BIOSes _don't_ actually hide it, so far. */
4565 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4566 if (!pdev)
4567 return;
4568
4569 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4570 pci_dev_put(pdev);
4571 return;
4572 }
4573
4574 pci_dev_put(pdev);
4575
4576 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4577 if (vtisochctrl & 1)
4578 return;
4579
4580 /* Drop all bits other than the number of TLB entries */
4581 vtisochctrl &= 0x1c;
4582
4583 /* If we have the recommended number of TLB entries (16), fine. */
4584 if (vtisochctrl == 0x10)
4585 return;
4586
4587 /* Zero TLB entries? You get to ride the short bus to school. */
4588 if (!vtisochctrl) {
4589 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4590 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4591 dmi_get_system_info(DMI_BIOS_VENDOR),
4592 dmi_get_system_info(DMI_BIOS_VERSION),
4593 dmi_get_system_info(DMI_PRODUCT_VERSION));
4594 iommu_identity_mapping |= IDENTMAP_AZALIA;
4595 return;
4596 }
4597
4598 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4599 vtisochctrl);
4600}