blob: a312e4966686f55b6e8f7a3c3ff9baa829d7b86e [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070018 */
19
20#include <linux/init.h>
21#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080022#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040023#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070024#include <linux/slab.h>
25#include <linux/irq.h>
26#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/spinlock.h>
28#include <linux/pci.h>
29#include <linux/dmar.h>
30#include <linux/dma-mapping.h>
31#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080032#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080033#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030034#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010035#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010037#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100039#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020040#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080041#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070042#include <linux/dma-contiguous.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070043#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070044#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090045#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046
Joerg Roedel078e1ee2012-09-26 12:44:43 +020047#include "irq_remapping.h"
48
Fenghua Yu5b6985c2008-10-16 18:02:32 -070049#define ROOT_SIZE VTD_PAGE_SIZE
50#define CONTEXT_SIZE VTD_PAGE_SIZE
51
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070054#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055
56#define IOAPIC_RANGE_START (0xfee00000)
57#define IOAPIC_RANGE_END (0xfeefffff)
58#define IOVA_START_ADDR (0x1000)
59
60#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
61
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070062#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080063#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070064
David Woodhouse2ebe3152009-09-19 07:34:04 -070065#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
66#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
67
68/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
69 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
70#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
71 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
72#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070073
Robin Murphy1b722502015-01-12 17:51:15 +000074/* IO virtual address start page frame number */
75#define IOVA_START_PFN (1)
76
Mark McLoughlinf27be032008-11-20 15:49:43 +000077#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070078#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070079#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080080
Andrew Mortondf08cdc2010-09-22 13:05:11 -070081/* page table handling */
82#define LEVEL_STRIDE (9)
83#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
84
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020085/*
86 * This bitmap is used to advertise the page sizes our hardware support
87 * to the IOMMU core, which will then use this information to split
88 * physically contiguous memory regions it is mapping into page sizes
89 * that we support.
90 *
91 * Traditionally the IOMMU core just handed us the mappings directly,
92 * after making sure the size is an order of a 4KiB page and that the
93 * mapping has natural alignment.
94 *
95 * To retain this behavior, we currently advertise that we support
96 * all page sizes that are an order of 4KiB.
97 *
98 * If at some point we'd like to utilize the IOMMU core's new behavior,
99 * we could change this to advertise the real page sizes we support.
100 */
101#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
102
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700103static inline int agaw_to_level(int agaw)
104{
105 return agaw + 2;
106}
107
108static inline int agaw_to_width(int agaw)
109{
Jiang Liu5c645b32014-01-06 14:18:12 +0800110 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700111}
112
113static inline int width_to_agaw(int width)
114{
Jiang Liu5c645b32014-01-06 14:18:12 +0800115 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700116}
117
118static inline unsigned int level_to_offset_bits(int level)
119{
120 return (level - 1) * LEVEL_STRIDE;
121}
122
123static inline int pfn_level_offset(unsigned long pfn, int level)
124{
125 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
126}
127
128static inline unsigned long level_mask(int level)
129{
130 return -1UL << level_to_offset_bits(level);
131}
132
133static inline unsigned long level_size(int level)
134{
135 return 1UL << level_to_offset_bits(level);
136}
137
138static inline unsigned long align_to_level(unsigned long pfn, int level)
139{
140 return (pfn + level_size(level) - 1) & level_mask(level);
141}
David Woodhousefd18de52009-05-10 23:57:41 +0100142
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100143static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
144{
Jiang Liu5c645b32014-01-06 14:18:12 +0800145 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100146}
147
David Woodhousedd4e8312009-06-27 16:21:20 +0100148/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
149 are never going to work. */
150static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
151{
152 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
153}
154
155static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
156{
157 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
158}
159static inline unsigned long page_to_dma_pfn(struct page *pg)
160{
161 return mm_to_dma_pfn(page_to_pfn(pg));
162}
163static inline unsigned long virt_to_dma_pfn(void *p)
164{
165 return page_to_dma_pfn(virt_to_page(p));
166}
167
Weidong Hand9630fe2008-12-08 11:06:32 +0800168/* global iommu list, set NULL for ignored DMAR units */
169static struct intel_iommu **g_iommus;
170
David Woodhousee0fc7e02009-09-30 09:12:17 -0700171static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000172static int rwbf_quirk;
173
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000174/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700175 * set to 1 to panic kernel if can't successfully enable VT-d
176 * (used when kernel is launched w/ TXT)
177 */
178static int force_on = 0;
179
180/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000181 * 0: Present
182 * 1-11: Reserved
183 * 12-63: Context Ptr (12 - (haw-1))
184 * 64-127: Reserved
185 */
186struct root_entry {
187 u64 val;
188 u64 rsvd1;
189};
190#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
191static inline bool root_present(struct root_entry *root)
192{
193 return (root->val & 1);
194}
195static inline void set_root_present(struct root_entry *root)
196{
197 root->val |= 1;
198}
199static inline void set_root_value(struct root_entry *root, unsigned long value)
200{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800201 root->val &= ~VTD_PAGE_MASK;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000202 root->val |= value & VTD_PAGE_MASK;
203}
204
205static inline struct context_entry *
206get_context_addr_from_root(struct root_entry *root)
207{
208 return (struct context_entry *)
209 (root_present(root)?phys_to_virt(
210 root->val & VTD_PAGE_MASK) :
211 NULL);
212}
213
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000214/*
215 * low 64 bits:
216 * 0: present
217 * 1: fault processing disable
218 * 2-3: translation type
219 * 12-63: address space root
220 * high 64 bits:
221 * 0-2: address width
222 * 3-6: aval
223 * 8-23: domain id
224 */
225struct context_entry {
226 u64 lo;
227 u64 hi;
228};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000229
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000230static inline bool context_present(struct context_entry *context)
231{
232 return (context->lo & 1);
233}
234static inline void context_set_present(struct context_entry *context)
235{
236 context->lo |= 1;
237}
238
239static inline void context_set_fault_enable(struct context_entry *context)
240{
241 context->lo &= (((u64)-1) << 2) | 1;
242}
243
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000244static inline void context_set_translation_type(struct context_entry *context,
245 unsigned long value)
246{
247 context->lo &= (((u64)-1) << 4) | 3;
248 context->lo |= (value & 3) << 2;
249}
250
251static inline void context_set_address_root(struct context_entry *context,
252 unsigned long value)
253{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800254 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000255 context->lo |= value & VTD_PAGE_MASK;
256}
257
258static inline void context_set_address_width(struct context_entry *context,
259 unsigned long value)
260{
261 context->hi |= value & 7;
262}
263
264static inline void context_set_domain_id(struct context_entry *context,
265 unsigned long value)
266{
267 context->hi |= (value & ((1 << 16) - 1)) << 8;
268}
269
270static inline void context_clear_entry(struct context_entry *context)
271{
272 context->lo = 0;
273 context->hi = 0;
274}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000275
Mark McLoughlin622ba122008-11-20 15:49:46 +0000276/*
277 * 0: readable
278 * 1: writable
279 * 2-6: reserved
280 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800281 * 8-10: available
282 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000283 * 12-63: Host physcial address
284 */
285struct dma_pte {
286 u64 val;
287};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000288
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000289static inline void dma_clear_pte(struct dma_pte *pte)
290{
291 pte->val = 0;
292}
293
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000294static inline u64 dma_pte_addr(struct dma_pte *pte)
295{
David Woodhousec85994e2009-07-01 19:21:24 +0100296#ifdef CONFIG_64BIT
297 return pte->val & VTD_PAGE_MASK;
298#else
299 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100300 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100301#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000302}
303
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000304static inline bool dma_pte_present(struct dma_pte *pte)
305{
306 return (pte->val & 3) != 0;
307}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000308
Allen Kay4399c8b2011-10-14 12:32:46 -0700309static inline bool dma_pte_superpage(struct dma_pte *pte)
310{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200311 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700312}
313
David Woodhouse75e6bf92009-07-02 11:21:16 +0100314static inline int first_pte_in_page(struct dma_pte *pte)
315{
316 return !((unsigned long)pte & ~VTD_PAGE_MASK);
317}
318
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700319/*
320 * This domain is a statically identity mapping domain.
321 * 1. This domain creats a static 1:1 mapping to all usable memory.
322 * 2. It maps to each iommu if successful.
323 * 3. Each iommu mapps to this domain if successful.
324 */
David Woodhouse19943b02009-08-04 16:19:20 +0100325static struct dmar_domain *si_domain;
326static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700327
Weidong Han1ce28fe2008-12-08 16:35:39 +0800328/* domain represents a virtual machine, more than one devices
329 * across iommus may be owned in one domain, e.g. kvm guest.
330 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800331#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800332
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700333/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800334#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700335
Mark McLoughlin99126f72008-11-20 15:49:47 +0000336struct dmar_domain {
337 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700338 int nid; /* node id */
Jiang Liu78d8e702014-11-09 22:47:57 +0800339 DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
Mike Travis1b198bb2012-03-05 15:05:16 -0800340 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000341
Joerg Roedel00a77de2015-03-26 13:43:08 +0100342 struct list_head devices; /* all devices' list */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000343 struct iova_domain iovad; /* iova's that belong to this domain */
344
345 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000346 int gaw; /* max guest address width */
347
348 /* adjusted guest address width, 0 is level 2 30-bit */
349 int agaw;
350
Weidong Han3b5410e2008-12-08 09:17:15 +0800351 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800352
353 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800354 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800355 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100356 int iommu_superpage;/* Level of superpages supported:
357 0 == 4KiB (no superpages), 1 == 2MiB,
358 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800359 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800360 u64 max_addr; /* maximum mapped address */
Joerg Roedel00a77de2015-03-26 13:43:08 +0100361
362 struct iommu_domain domain; /* generic domain data structure for
363 iommu core */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000364};
365
Mark McLoughlina647dac2008-11-20 15:49:48 +0000366/* PCI domain-device relationship */
367struct device_domain_info {
368 struct list_head link; /* link to domain siblings */
369 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100370 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000371 u8 devfn; /* PCI devfn number */
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000372 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800373 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000374 struct dmar_domain *domain; /* pointer to domain */
375};
376
Jiang Liub94e4112014-02-19 14:07:25 +0800377struct dmar_rmrr_unit {
378 struct list_head list; /* list of rmrr units */
379 struct acpi_dmar_header *hdr; /* ACPI header */
380 u64 base_address; /* reserved base address*/
381 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000382 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800383 int devices_cnt; /* target device count */
384};
385
386struct dmar_atsr_unit {
387 struct list_head list; /* list of ATSR units */
388 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000389 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800390 int devices_cnt; /* target device count */
391 u8 include_all:1; /* include all ports */
392};
393
394static LIST_HEAD(dmar_atsr_units);
395static LIST_HEAD(dmar_rmrr_units);
396
397#define for_each_rmrr_units(rmrr) \
398 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
399
mark gross5e0d2a62008-03-04 15:22:08 -0800400static void flush_unmaps_timeout(unsigned long data);
401
Jiang Liub707cb02014-01-06 14:18:26 +0800402static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800403
mark gross80b20dd2008-04-18 13:53:58 -0700404#define HIGH_WATER_MARK 250
405struct deferred_flush_tables {
406 int next;
407 struct iova *iova[HIGH_WATER_MARK];
408 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000409 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700410};
411
412static struct deferred_flush_tables *deferred_flush;
413
mark gross5e0d2a62008-03-04 15:22:08 -0800414/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800415static int g_num_of_iommus;
416
417static DEFINE_SPINLOCK(async_umap_flush_lock);
418static LIST_HEAD(unmaps_to_do);
419
420static int timer_on;
421static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800422
Jiang Liu92d03cc2014-02-19 14:07:28 +0800423static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700424static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800425static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -0700426 struct device *dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800427static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000428 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800429static int domain_detach_iommu(struct dmar_domain *domain,
430 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700431
Suresh Siddhad3f13812011-08-23 17:05:25 -0700432#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800433int dmar_disabled = 0;
434#else
435int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700436#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800437
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200438int intel_iommu_enabled = 0;
439EXPORT_SYMBOL_GPL(intel_iommu_enabled);
440
David Woodhouse2d9e6672010-06-15 10:57:57 +0100441static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700442static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800443static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100444static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700445
David Woodhousec0771df2011-10-14 20:59:46 +0100446int intel_iommu_gfx_mapped;
447EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
448
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700449#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
450static DEFINE_SPINLOCK(device_domain_lock);
451static LIST_HEAD(device_domain_list);
452
Thierry Redingb22f6432014-06-27 09:03:12 +0200453static const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100454
Joerg Roedel00a77de2015-03-26 13:43:08 +0100455/* Convert generic 'struct iommu_domain to private struct dmar_domain */
456static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
457{
458 return container_of(dom, struct dmar_domain, domain);
459}
460
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700461static int __init intel_iommu_setup(char *str)
462{
463 if (!str)
464 return -EINVAL;
465 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800466 if (!strncmp(str, "on", 2)) {
467 dmar_disabled = 0;
468 printk(KERN_INFO "Intel-IOMMU: enabled\n");
469 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700470 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800471 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700472 } else if (!strncmp(str, "igfx_off", 8)) {
473 dmar_map_gfx = 0;
474 printk(KERN_INFO
475 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700476 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800477 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700478 "Intel-IOMMU: Forcing DAC for PCI devices\n");
479 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800480 } else if (!strncmp(str, "strict", 6)) {
481 printk(KERN_INFO
482 "Intel-IOMMU: disable batched IOTLB flush\n");
483 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100484 } else if (!strncmp(str, "sp_off", 6)) {
485 printk(KERN_INFO
486 "Intel-IOMMU: disable supported super page\n");
487 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700488 }
489
490 str += strcspn(str, ",");
491 while (*str == ',')
492 str++;
493 }
494 return 0;
495}
496__setup("intel_iommu=", intel_iommu_setup);
497
498static struct kmem_cache *iommu_domain_cache;
499static struct kmem_cache *iommu_devinfo_cache;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700500
Suresh Siddha4c923d42009-10-02 11:01:24 -0700501static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700502{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700503 struct page *page;
504 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700505
Suresh Siddha4c923d42009-10-02 11:01:24 -0700506 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
507 if (page)
508 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700509 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700510}
511
512static inline void free_pgtable_page(void *vaddr)
513{
514 free_page((unsigned long)vaddr);
515}
516
517static inline void *alloc_domain_mem(void)
518{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900519 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700520}
521
Kay, Allen M38717942008-09-09 18:37:29 +0300522static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700523{
524 kmem_cache_free(iommu_domain_cache, vaddr);
525}
526
527static inline void * alloc_devinfo_mem(void)
528{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900529 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700530}
531
532static inline void free_devinfo_mem(void *vaddr)
533{
534 kmem_cache_free(iommu_devinfo_cache, vaddr);
535}
536
Jiang Liuab8dfe22014-07-11 14:19:27 +0800537static inline int domain_type_is_vm(struct dmar_domain *domain)
538{
539 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
540}
541
542static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
543{
544 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
545 DOMAIN_FLAG_STATIC_IDENTITY);
546}
Weidong Han1b573682008-12-08 15:34:06 +0800547
Jiang Liu162d1b12014-07-11 14:19:35 +0800548static inline int domain_pfn_supported(struct dmar_domain *domain,
549 unsigned long pfn)
550{
551 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
552
553 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
554}
555
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700556static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800557{
558 unsigned long sagaw;
559 int agaw = -1;
560
561 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700562 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800563 agaw >= 0; agaw--) {
564 if (test_bit(agaw, &sagaw))
565 break;
566 }
567
568 return agaw;
569}
570
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700571/*
572 * Calculate max SAGAW for each iommu.
573 */
574int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
575{
576 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
577}
578
579/*
580 * calculate agaw for each iommu.
581 * "SAGAW" may be different across iommus, use a default agaw, and
582 * get a supported less agaw for iommus that don't support the default agaw.
583 */
584int iommu_calculate_agaw(struct intel_iommu *iommu)
585{
586 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
587}
588
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700589/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800590static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
591{
592 int iommu_id;
593
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700594 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800595 BUG_ON(domain_type_is_vm_or_si(domain));
Mike Travis1b198bb2012-03-05 15:05:16 -0800596 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800597 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
598 return NULL;
599
600 return g_iommus[iommu_id];
601}
602
Weidong Han8e6040972008-12-08 15:49:06 +0800603static void domain_update_iommu_coherency(struct dmar_domain *domain)
604{
David Woodhoused0501962014-03-11 17:10:29 -0700605 struct dmar_drhd_unit *drhd;
606 struct intel_iommu *iommu;
607 int i, found = 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800608
David Woodhoused0501962014-03-11 17:10:29 -0700609 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800610
Mike Travis1b198bb2012-03-05 15:05:16 -0800611 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
David Woodhoused0501962014-03-11 17:10:29 -0700612 found = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800613 if (!ecap_coherent(g_iommus[i]->ecap)) {
614 domain->iommu_coherency = 0;
615 break;
616 }
Weidong Han8e6040972008-12-08 15:49:06 +0800617 }
David Woodhoused0501962014-03-11 17:10:29 -0700618 if (found)
619 return;
620
621 /* No hardware attached; use lowest common denominator */
622 rcu_read_lock();
623 for_each_active_iommu(iommu, drhd) {
624 if (!ecap_coherent(iommu->ecap)) {
625 domain->iommu_coherency = 0;
626 break;
627 }
628 }
629 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800630}
631
Jiang Liu161f6932014-07-11 14:19:37 +0800632static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100633{
Allen Kay8140a952011-10-14 12:32:17 -0700634 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800635 struct intel_iommu *iommu;
636 int ret = 1;
637
638 rcu_read_lock();
639 for_each_active_iommu(iommu, drhd) {
640 if (iommu != skip) {
641 if (!ecap_sc_support(iommu->ecap)) {
642 ret = 0;
643 break;
644 }
645 }
646 }
647 rcu_read_unlock();
648
649 return ret;
650}
651
652static int domain_update_iommu_superpage(struct intel_iommu *skip)
653{
654 struct dmar_drhd_unit *drhd;
655 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700656 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100657
658 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800659 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100660 }
661
Allen Kay8140a952011-10-14 12:32:17 -0700662 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800663 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700664 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800665 if (iommu != skip) {
666 mask &= cap_super_page_val(iommu->cap);
667 if (!mask)
668 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100669 }
670 }
Jiang Liu0e242612014-02-19 14:07:34 +0800671 rcu_read_unlock();
672
Jiang Liu161f6932014-07-11 14:19:37 +0800673 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100674}
675
Sheng Yang58c610b2009-03-18 15:33:05 +0800676/* Some capabilities may be different across iommus */
677static void domain_update_iommu_cap(struct dmar_domain *domain)
678{
679 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800680 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
681 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800682}
683
David Woodhouse156baca2014-03-09 14:00:57 -0700684static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800685{
686 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800687 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700688 struct device *tmp;
689 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800690 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800691 int i;
692
David Woodhouse156baca2014-03-09 14:00:57 -0700693 if (dev_is_pci(dev)) {
694 pdev = to_pci_dev(dev);
695 segment = pci_domain_nr(pdev->bus);
696 } else if (ACPI_COMPANION(dev))
697 dev = &ACPI_COMPANION(dev)->dev;
698
Jiang Liu0e242612014-02-19 14:07:34 +0800699 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800700 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700701 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100702 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800703
Jiang Liub683b232014-02-19 14:07:32 +0800704 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700705 drhd->devices_cnt, i, tmp) {
706 if (tmp == dev) {
707 *bus = drhd->devices[i].bus;
708 *devfn = drhd->devices[i].devfn;
709 goto out;
710 }
711
712 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000713 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700714
715 ptmp = to_pci_dev(tmp);
716 if (ptmp->subordinate &&
717 ptmp->subordinate->number <= pdev->bus->number &&
718 ptmp->subordinate->busn_res.end >= pdev->bus->number)
719 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100720 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800721
David Woodhouse156baca2014-03-09 14:00:57 -0700722 if (pdev && drhd->include_all) {
723 got_pdev:
724 *bus = pdev->bus->number;
725 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800726 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700727 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800728 }
Jiang Liub683b232014-02-19 14:07:32 +0800729 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700730 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800731 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800732
Jiang Liub683b232014-02-19 14:07:32 +0800733 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800734}
735
Weidong Han5331fe62008-12-08 23:00:00 +0800736static void domain_flush_cache(struct dmar_domain *domain,
737 void *addr, int size)
738{
739 if (!domain->iommu_coherency)
740 clflush_cache_range(addr, size);
741}
742
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700743/* Gets context entry for a given bus and devfn */
744static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
745 u8 bus, u8 devfn)
746{
747 struct root_entry *root;
748 struct context_entry *context;
749 unsigned long phy_addr;
750 unsigned long flags;
751
752 spin_lock_irqsave(&iommu->lock, flags);
753 root = &iommu->root_entry[bus];
754 context = get_context_addr_from_root(root);
755 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700756 context = (struct context_entry *)
757 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700758 if (!context) {
759 spin_unlock_irqrestore(&iommu->lock, flags);
760 return NULL;
761 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700762 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700763 phy_addr = virt_to_phys((void *)context);
764 set_root_value(root, phy_addr);
765 set_root_present(root);
766 __iommu_flush_cache(iommu, root, sizeof(*root));
767 }
768 spin_unlock_irqrestore(&iommu->lock, flags);
769 return &context[devfn];
770}
771
772static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
773{
774 struct root_entry *root;
775 struct context_entry *context;
776 int ret;
777 unsigned long flags;
778
779 spin_lock_irqsave(&iommu->lock, flags);
780 root = &iommu->root_entry[bus];
781 context = get_context_addr_from_root(root);
782 if (!context) {
783 ret = 0;
784 goto out;
785 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000786 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700787out:
788 spin_unlock_irqrestore(&iommu->lock, flags);
789 return ret;
790}
791
792static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
793{
794 struct root_entry *root;
795 struct context_entry *context;
796 unsigned long flags;
797
798 spin_lock_irqsave(&iommu->lock, flags);
799 root = &iommu->root_entry[bus];
800 context = get_context_addr_from_root(root);
801 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000802 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700803 __iommu_flush_cache(iommu, &context[devfn], \
804 sizeof(*context));
805 }
806 spin_unlock_irqrestore(&iommu->lock, flags);
807}
808
809static void free_context_table(struct intel_iommu *iommu)
810{
811 struct root_entry *root;
812 int i;
813 unsigned long flags;
814 struct context_entry *context;
815
816 spin_lock_irqsave(&iommu->lock, flags);
817 if (!iommu->root_entry) {
818 goto out;
819 }
820 for (i = 0; i < ROOT_ENTRY_NR; i++) {
821 root = &iommu->root_entry[i];
822 context = get_context_addr_from_root(root);
823 if (context)
824 free_pgtable_page(context);
825 }
826 free_pgtable_page(iommu->root_entry);
827 iommu->root_entry = NULL;
828out:
829 spin_unlock_irqrestore(&iommu->lock, flags);
830}
831
David Woodhouseb026fd22009-06-28 10:37:25 +0100832static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000833 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700834{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700835 struct dma_pte *parent, *pte = NULL;
836 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700837 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700838
839 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200840
Jiang Liu162d1b12014-07-11 14:19:35 +0800841 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200842 /* Address beyond IOMMU's addressing capabilities. */
843 return NULL;
844
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700845 parent = domain->pgd;
846
David Woodhouse5cf0a762014-03-19 16:07:49 +0000847 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700848 void *tmp_page;
849
David Woodhouseb026fd22009-06-28 10:37:25 +0100850 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700851 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000852 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100853 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000854 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700855 break;
856
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000857 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100858 uint64_t pteval;
859
Suresh Siddha4c923d42009-10-02 11:01:24 -0700860 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700861
David Woodhouse206a73c12009-07-01 19:30:28 +0100862 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700863 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100864
David Woodhousec85994e2009-07-01 19:21:24 +0100865 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400866 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +0800867 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +0100868 /* Someone else set it while we were thinking; use theirs. */
869 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +0800870 else
David Woodhousec85994e2009-07-01 19:21:24 +0100871 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700872 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000873 if (level == 1)
874 break;
875
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000876 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700877 level--;
878 }
879
David Woodhouse5cf0a762014-03-19 16:07:49 +0000880 if (!*target_level)
881 *target_level = level;
882
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700883 return pte;
884}
885
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100886
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700887/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100888static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
889 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100890 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700891{
892 struct dma_pte *parent, *pte = NULL;
893 int total = agaw_to_level(domain->agaw);
894 int offset;
895
896 parent = domain->pgd;
897 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100898 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700899 pte = &parent[offset];
900 if (level == total)
901 return pte;
902
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100903 if (!dma_pte_present(pte)) {
904 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700905 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100906 }
907
Yijing Wange16922a2014-05-20 20:37:51 +0800908 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100909 *large_page = total;
910 return pte;
911 }
912
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000913 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700914 total--;
915 }
916 return NULL;
917}
918
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700919/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +0000920static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100921 unsigned long start_pfn,
922 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700923{
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100924 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100925 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700926
Jiang Liu162d1b12014-07-11 14:19:35 +0800927 BUG_ON(!domain_pfn_supported(domain, start_pfn));
928 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700929 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100930
David Woodhouse04b18e62009-06-27 19:15:01 +0100931 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700932 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100933 large_page = 1;
934 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100935 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100936 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100937 continue;
938 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100939 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100940 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100941 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100942 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100943 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
944
David Woodhouse310a5ab2009-06-28 18:52:20 +0100945 domain_flush_cache(domain, first_pte,
946 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700947
948 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700949}
950
Alex Williamson3269ee02013-06-15 10:27:19 -0600951static void dma_pte_free_level(struct dmar_domain *domain, int level,
952 struct dma_pte *pte, unsigned long pfn,
953 unsigned long start_pfn, unsigned long last_pfn)
954{
955 pfn = max(start_pfn, pfn);
956 pte = &pte[pfn_level_offset(pfn, level)];
957
958 do {
959 unsigned long level_pfn;
960 struct dma_pte *level_pte;
961
962 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
963 goto next;
964
965 level_pfn = pfn & level_mask(level - 1);
966 level_pte = phys_to_virt(dma_pte_addr(pte));
967
968 if (level > 2)
969 dma_pte_free_level(domain, level - 1, level_pte,
970 level_pfn, start_pfn, last_pfn);
971
972 /* If range covers entire pagetable, free it */
973 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -0800974 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -0600975 dma_clear_pte(pte);
976 domain_flush_cache(domain, pte, sizeof(*pte));
977 free_pgtable_page(level_pte);
978 }
979next:
980 pfn += level_size(level);
981 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
982}
983
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700984/* free page table pages. last level pte should already be cleared */
985static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100986 unsigned long start_pfn,
987 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700988{
Jiang Liu162d1b12014-07-11 14:19:35 +0800989 BUG_ON(!domain_pfn_supported(domain, start_pfn));
990 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700991 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700992
Jiang Liud41a4ad2014-07-11 14:19:34 +0800993 dma_pte_clear_range(domain, start_pfn, last_pfn);
994
David Woodhousef3a0a522009-06-30 03:40:07 +0100995 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -0600996 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
997 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +0100998
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700999 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +01001000 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001001 free_pgtable_page(domain->pgd);
1002 domain->pgd = NULL;
1003 }
1004}
1005
David Woodhouseea8ea462014-03-05 17:09:32 +00001006/* When a page at a given level is being unlinked from its parent, we don't
1007 need to *modify* it at all. All we need to do is make a list of all the
1008 pages which can be freed just as soon as we've flushed the IOTLB and we
1009 know the hardware page-walk will no longer touch them.
1010 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1011 be freed. */
1012static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1013 int level, struct dma_pte *pte,
1014 struct page *freelist)
1015{
1016 struct page *pg;
1017
1018 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1019 pg->freelist = freelist;
1020 freelist = pg;
1021
1022 if (level == 1)
1023 return freelist;
1024
Jiang Liuadeb2592014-04-09 10:20:39 +08001025 pte = page_address(pg);
1026 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001027 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1028 freelist = dma_pte_list_pagetables(domain, level - 1,
1029 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001030 pte++;
1031 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001032
1033 return freelist;
1034}
1035
1036static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1037 struct dma_pte *pte, unsigned long pfn,
1038 unsigned long start_pfn,
1039 unsigned long last_pfn,
1040 struct page *freelist)
1041{
1042 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1043
1044 pfn = max(start_pfn, pfn);
1045 pte = &pte[pfn_level_offset(pfn, level)];
1046
1047 do {
1048 unsigned long level_pfn;
1049
1050 if (!dma_pte_present(pte))
1051 goto next;
1052
1053 level_pfn = pfn & level_mask(level);
1054
1055 /* If range covers entire pagetable, free it */
1056 if (start_pfn <= level_pfn &&
1057 last_pfn >= level_pfn + level_size(level) - 1) {
1058 /* These suborbinate page tables are going away entirely. Don't
1059 bother to clear them; we're just going to *free* them. */
1060 if (level > 1 && !dma_pte_superpage(pte))
1061 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1062
1063 dma_clear_pte(pte);
1064 if (!first_pte)
1065 first_pte = pte;
1066 last_pte = pte;
1067 } else if (level > 1) {
1068 /* Recurse down into a level that isn't *entirely* obsolete */
1069 freelist = dma_pte_clear_level(domain, level - 1,
1070 phys_to_virt(dma_pte_addr(pte)),
1071 level_pfn, start_pfn, last_pfn,
1072 freelist);
1073 }
1074next:
1075 pfn += level_size(level);
1076 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1077
1078 if (first_pte)
1079 domain_flush_cache(domain, first_pte,
1080 (void *)++last_pte - (void *)first_pte);
1081
1082 return freelist;
1083}
1084
1085/* We can't just free the pages because the IOMMU may still be walking
1086 the page tables, and may have cached the intermediate levels. The
1087 pages can only be freed after the IOTLB flush has been done. */
1088struct page *domain_unmap(struct dmar_domain *domain,
1089 unsigned long start_pfn,
1090 unsigned long last_pfn)
1091{
David Woodhouseea8ea462014-03-05 17:09:32 +00001092 struct page *freelist = NULL;
1093
Jiang Liu162d1b12014-07-11 14:19:35 +08001094 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1095 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001096 BUG_ON(start_pfn > last_pfn);
1097
1098 /* we don't need lock here; nobody else touches the iova range */
1099 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1100 domain->pgd, 0, start_pfn, last_pfn, NULL);
1101
1102 /* free pgd */
1103 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1104 struct page *pgd_page = virt_to_page(domain->pgd);
1105 pgd_page->freelist = freelist;
1106 freelist = pgd_page;
1107
1108 domain->pgd = NULL;
1109 }
1110
1111 return freelist;
1112}
1113
1114void dma_free_pagelist(struct page *freelist)
1115{
1116 struct page *pg;
1117
1118 while ((pg = freelist)) {
1119 freelist = pg->freelist;
1120 free_pgtable_page(page_address(pg));
1121 }
1122}
1123
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001124/* iommu handling */
1125static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1126{
1127 struct root_entry *root;
1128 unsigned long flags;
1129
Suresh Siddha4c923d42009-10-02 11:01:24 -07001130 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001131 if (!root) {
1132 pr_err("IOMMU: allocating root entry for %s failed\n",
1133 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001134 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001135 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001136
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001137 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001138
1139 spin_lock_irqsave(&iommu->lock, flags);
1140 iommu->root_entry = root;
1141 spin_unlock_irqrestore(&iommu->lock, flags);
1142
1143 return 0;
1144}
1145
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001146static void iommu_set_root_entry(struct intel_iommu *iommu)
1147{
1148 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001149 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001150 unsigned long flag;
1151
1152 addr = iommu->root_entry;
1153
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001154 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001155 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
1156
David Woodhousec416daa2009-05-10 20:30:58 +01001157 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001158
1159 /* Make sure hardware complete it */
1160 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001161 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001162
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001163 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001164}
1165
1166static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1167{
1168 u32 val;
1169 unsigned long flag;
1170
David Woodhouse9af88142009-02-13 23:18:03 +00001171 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001172 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001173
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001174 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001175 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001176
1177 /* Make sure hardware complete it */
1178 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001179 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001180
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001181 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001182}
1183
1184/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001185static void __iommu_flush_context(struct intel_iommu *iommu,
1186 u16 did, u16 source_id, u8 function_mask,
1187 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001188{
1189 u64 val = 0;
1190 unsigned long flag;
1191
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001192 switch (type) {
1193 case DMA_CCMD_GLOBAL_INVL:
1194 val = DMA_CCMD_GLOBAL_INVL;
1195 break;
1196 case DMA_CCMD_DOMAIN_INVL:
1197 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1198 break;
1199 case DMA_CCMD_DEVICE_INVL:
1200 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1201 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1202 break;
1203 default:
1204 BUG();
1205 }
1206 val |= DMA_CCMD_ICC;
1207
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001208 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001209 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1210
1211 /* Make sure hardware complete it */
1212 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1213 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1214
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001215 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001216}
1217
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001218/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001219static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1220 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001221{
1222 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1223 u64 val = 0, val_iva = 0;
1224 unsigned long flag;
1225
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001226 switch (type) {
1227 case DMA_TLB_GLOBAL_FLUSH:
1228 /* global flush doesn't need set IVA_REG */
1229 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1230 break;
1231 case DMA_TLB_DSI_FLUSH:
1232 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1233 break;
1234 case DMA_TLB_PSI_FLUSH:
1235 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001236 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001237 val_iva = size_order | addr;
1238 break;
1239 default:
1240 BUG();
1241 }
1242 /* Note: set drain read/write */
1243#if 0
1244 /*
1245 * This is probably to be super secure.. Looks like we can
1246 * ignore it without any impact.
1247 */
1248 if (cap_read_drain(iommu->cap))
1249 val |= DMA_TLB_READ_DRAIN;
1250#endif
1251 if (cap_write_drain(iommu->cap))
1252 val |= DMA_TLB_WRITE_DRAIN;
1253
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001254 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001255 /* Note: Only uses first TLB reg currently */
1256 if (val_iva)
1257 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1258 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1259
1260 /* Make sure hardware complete it */
1261 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1262 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1263
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001264 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001265
1266 /* check IOTLB invalidation granularity */
1267 if (DMA_TLB_IAIG(val) == 0)
1268 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1269 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1270 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001271 (unsigned long long)DMA_TLB_IIRG(type),
1272 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001273}
1274
David Woodhouse64ae8922014-03-09 12:52:30 -07001275static struct device_domain_info *
1276iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1277 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001278{
Yu Zhao93a23a72009-05-18 13:51:37 +08001279 int found = 0;
1280 unsigned long flags;
1281 struct device_domain_info *info;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001282 struct pci_dev *pdev;
Yu Zhao93a23a72009-05-18 13:51:37 +08001283
1284 if (!ecap_dev_iotlb_support(iommu->ecap))
1285 return NULL;
1286
1287 if (!iommu->qi)
1288 return NULL;
1289
1290 spin_lock_irqsave(&device_domain_lock, flags);
1291 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001292 if (info->iommu == iommu && info->bus == bus &&
1293 info->devfn == devfn) {
Yu Zhao93a23a72009-05-18 13:51:37 +08001294 found = 1;
1295 break;
1296 }
1297 spin_unlock_irqrestore(&device_domain_lock, flags);
1298
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001299 if (!found || !info->dev || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001300 return NULL;
1301
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001302 pdev = to_pci_dev(info->dev);
1303
1304 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Yu Zhao93a23a72009-05-18 13:51:37 +08001305 return NULL;
1306
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001307 if (!dmar_find_matched_atsr_unit(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001308 return NULL;
1309
Yu Zhao93a23a72009-05-18 13:51:37 +08001310 return info;
1311}
1312
1313static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1314{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001315 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001316 return;
1317
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001318 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Yu Zhao93a23a72009-05-18 13:51:37 +08001319}
1320
1321static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1322{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001323 if (!info->dev || !dev_is_pci(info->dev) ||
1324 !pci_ats_enabled(to_pci_dev(info->dev)))
Yu Zhao93a23a72009-05-18 13:51:37 +08001325 return;
1326
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001327 pci_disable_ats(to_pci_dev(info->dev));
Yu Zhao93a23a72009-05-18 13:51:37 +08001328}
1329
1330static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1331 u64 addr, unsigned mask)
1332{
1333 u16 sid, qdep;
1334 unsigned long flags;
1335 struct device_domain_info *info;
1336
1337 spin_lock_irqsave(&device_domain_lock, flags);
1338 list_for_each_entry(info, &domain->devices, link) {
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001339 struct pci_dev *pdev;
1340 if (!info->dev || !dev_is_pci(info->dev))
1341 continue;
1342
1343 pdev = to_pci_dev(info->dev);
1344 if (!pci_ats_enabled(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001345 continue;
1346
1347 sid = info->bus << 8 | info->devfn;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001348 qdep = pci_ats_queue_depth(pdev);
Yu Zhao93a23a72009-05-18 13:51:37 +08001349 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1350 }
1351 spin_unlock_irqrestore(&device_domain_lock, flags);
1352}
1353
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001354static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouseea8ea462014-03-05 17:09:32 +00001355 unsigned long pfn, unsigned int pages, int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001356{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001357 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001358 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001359
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001360 BUG_ON(pages == 0);
1361
David Woodhouseea8ea462014-03-05 17:09:32 +00001362 if (ih)
1363 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001364 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001365 * Fallback to domain selective flush if no PSI support or the size is
1366 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001367 * PSI requires page size to be 2 ^ x, and the base address is naturally
1368 * aligned to the size
1369 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001370 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1371 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001372 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001373 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001374 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001375 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001376
1377 /*
Nadav Amit82653632010-04-01 13:24:40 +03001378 * In caching mode, changes of pages from non-present to present require
1379 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001380 */
Nadav Amit82653632010-04-01 13:24:40 +03001381 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001382 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001383}
1384
mark grossf8bab732008-02-08 04:18:38 -08001385static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1386{
1387 u32 pmen;
1388 unsigned long flags;
1389
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001390 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001391 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1392 pmen &= ~DMA_PMEN_EPM;
1393 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1394
1395 /* wait for the protected region status bit to clear */
1396 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1397 readl, !(pmen & DMA_PMEN_PRS), pmen);
1398
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001399 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001400}
1401
Jiang Liu2a41cce2014-07-11 14:19:33 +08001402static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001403{
1404 u32 sts;
1405 unsigned long flags;
1406
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001407 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001408 iommu->gcmd |= DMA_GCMD_TE;
1409 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001410
1411 /* Make sure hardware complete it */
1412 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001413 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001414
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001415 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001416}
1417
Jiang Liu2a41cce2014-07-11 14:19:33 +08001418static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001419{
1420 u32 sts;
1421 unsigned long flag;
1422
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001423 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001424 iommu->gcmd &= ~DMA_GCMD_TE;
1425 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1426
1427 /* Make sure hardware complete it */
1428 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001429 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001430
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001431 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001432}
1433
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001434
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001435static int iommu_init_domains(struct intel_iommu *iommu)
1436{
1437 unsigned long ndomains;
1438 unsigned long nlongs;
1439
1440 ndomains = cap_ndoms(iommu->cap);
Jiang Liu852bdb02014-01-06 14:18:11 +08001441 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1442 iommu->seq_id, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001443 nlongs = BITS_TO_LONGS(ndomains);
1444
Donald Dutile94a91b52009-08-20 16:51:34 -04001445 spin_lock_init(&iommu->lock);
1446
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001447 /* TBD: there might be 64K domains,
1448 * consider other allocation for future chip
1449 */
1450 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1451 if (!iommu->domain_ids) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001452 pr_err("IOMMU%d: allocating domain id array failed\n",
1453 iommu->seq_id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001454 return -ENOMEM;
1455 }
1456 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1457 GFP_KERNEL);
1458 if (!iommu->domains) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001459 pr_err("IOMMU%d: allocating domain array failed\n",
1460 iommu->seq_id);
1461 kfree(iommu->domain_ids);
1462 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001463 return -ENOMEM;
1464 }
1465
1466 /*
1467 * if Caching mode is set, then invalid translations are tagged
1468 * with domainid 0. Hence we need to pre-allocate it.
1469 */
1470 if (cap_caching_mode(iommu->cap))
1471 set_bit(0, iommu->domain_ids);
1472 return 0;
1473}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001474
Jiang Liuffebeb42014-11-09 22:48:02 +08001475static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001476{
1477 struct dmar_domain *domain;
Jiang Liu2a46ddf2014-07-11 14:19:30 +08001478 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001479
Donald Dutile94a91b52009-08-20 16:51:34 -04001480 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001481 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001482 /*
1483 * Domain id 0 is reserved for invalid translation
1484 * if hardware supports caching mode.
1485 */
1486 if (cap_caching_mode(iommu->cap) && i == 0)
1487 continue;
1488
Donald Dutile94a91b52009-08-20 16:51:34 -04001489 domain = iommu->domains[i];
1490 clear_bit(i, iommu->domain_ids);
Jiang Liu129ad282014-07-11 14:19:31 +08001491 if (domain_detach_iommu(domain, iommu) == 0 &&
1492 !domain_type_is_vm(domain))
Jiang Liu92d03cc2014-02-19 14:07:28 +08001493 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001494 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001495 }
1496
1497 if (iommu->gcmd & DMA_GCMD_TE)
1498 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001499}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001500
Jiang Liuffebeb42014-11-09 22:48:02 +08001501static void free_dmar_iommu(struct intel_iommu *iommu)
1502{
1503 if ((iommu->domains) && (iommu->domain_ids)) {
1504 kfree(iommu->domains);
1505 kfree(iommu->domain_ids);
1506 iommu->domains = NULL;
1507 iommu->domain_ids = NULL;
1508 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001509
Weidong Hand9630fe2008-12-08 11:06:32 +08001510 g_iommus[iommu->seq_id] = NULL;
1511
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001512 /* free context mapping */
1513 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001514}
1515
Jiang Liuab8dfe22014-07-11 14:19:27 +08001516static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001517{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001518 /* domain id for virtual machine, it won't be set in context */
1519 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001520 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001521
1522 domain = alloc_domain_mem();
1523 if (!domain)
1524 return NULL;
1525
Jiang Liuab8dfe22014-07-11 14:19:27 +08001526 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001527 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001528 domain->flags = flags;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001529 spin_lock_init(&domain->iommu_lock);
1530 INIT_LIST_HEAD(&domain->devices);
Jiang Liuab8dfe22014-07-11 14:19:27 +08001531 if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001532 domain->id = atomic_inc_return(&vm_domid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001533
1534 return domain;
1535}
1536
Jiang Liufb170fb2014-07-11 14:19:28 +08001537static int __iommu_attach_domain(struct dmar_domain *domain,
1538 struct intel_iommu *iommu)
1539{
1540 int num;
1541 unsigned long ndomains;
1542
1543 ndomains = cap_ndoms(iommu->cap);
1544 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1545 if (num < ndomains) {
1546 set_bit(num, iommu->domain_ids);
1547 iommu->domains[num] = domain;
1548 } else {
1549 num = -ENOSPC;
1550 }
1551
1552 return num;
1553}
1554
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001555static int iommu_attach_domain(struct dmar_domain *domain,
1556 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001557{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001558 int num;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001559 unsigned long flags;
1560
Weidong Han8c11e792008-12-08 15:29:22 +08001561 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001562 num = __iommu_attach_domain(domain, iommu);
Jiang Liu44bde612014-07-11 14:19:29 +08001563 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001564 if (num < 0)
1565 pr_err("IOMMU: no free domain ids\n");
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001566
Jiang Liufb170fb2014-07-11 14:19:28 +08001567 return num;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001568}
1569
Jiang Liu44bde612014-07-11 14:19:29 +08001570static int iommu_attach_vm_domain(struct dmar_domain *domain,
1571 struct intel_iommu *iommu)
1572{
1573 int num;
1574 unsigned long ndomains;
1575
1576 ndomains = cap_ndoms(iommu->cap);
1577 for_each_set_bit(num, iommu->domain_ids, ndomains)
1578 if (iommu->domains[num] == domain)
1579 return num;
1580
1581 return __iommu_attach_domain(domain, iommu);
1582}
1583
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001584static void iommu_detach_domain(struct dmar_domain *domain,
1585 struct intel_iommu *iommu)
1586{
1587 unsigned long flags;
1588 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001589
1590 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001591 if (domain_type_is_vm_or_si(domain)) {
1592 ndomains = cap_ndoms(iommu->cap);
1593 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1594 if (iommu->domains[num] == domain) {
1595 clear_bit(num, iommu->domain_ids);
1596 iommu->domains[num] = NULL;
1597 break;
1598 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001599 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001600 } else {
1601 clear_bit(domain->id, iommu->domain_ids);
1602 iommu->domains[domain->id] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001603 }
Weidong Han8c11e792008-12-08 15:29:22 +08001604 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001605}
1606
Jiang Liufb170fb2014-07-11 14:19:28 +08001607static void domain_attach_iommu(struct dmar_domain *domain,
1608 struct intel_iommu *iommu)
1609{
1610 unsigned long flags;
1611
1612 spin_lock_irqsave(&domain->iommu_lock, flags);
1613 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1614 domain->iommu_count++;
1615 if (domain->iommu_count == 1)
1616 domain->nid = iommu->node;
1617 domain_update_iommu_cap(domain);
1618 }
1619 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1620}
1621
1622static int domain_detach_iommu(struct dmar_domain *domain,
1623 struct intel_iommu *iommu)
1624{
1625 unsigned long flags;
1626 int count = INT_MAX;
1627
1628 spin_lock_irqsave(&domain->iommu_lock, flags);
1629 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1630 count = --domain->iommu_count;
1631 domain_update_iommu_cap(domain);
1632 }
1633 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1634
1635 return count;
1636}
1637
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001638static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001639static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001640
Joseph Cihula51a63e62011-03-21 11:04:24 -07001641static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001642{
1643 struct pci_dev *pdev = NULL;
1644 struct iova *iova;
1645 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001646
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001647 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1648 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001649
Mark Gross8a443df2008-03-04 14:59:31 -08001650 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1651 &reserved_rbtree_key);
1652
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001653 /* IOAPIC ranges shouldn't be accessed by DMA */
1654 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1655 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001656 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001657 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001658 return -ENODEV;
1659 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001660
1661 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1662 for_each_pci_dev(pdev) {
1663 struct resource *r;
1664
1665 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1666 r = &pdev->resource[i];
1667 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1668 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001669 iova = reserve_iova(&reserved_iova_list,
1670 IOVA_PFN(r->start),
1671 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001672 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001673 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001674 return -ENODEV;
1675 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001676 }
1677 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001678 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001679}
1680
1681static void domain_reserve_special_ranges(struct dmar_domain *domain)
1682{
1683 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1684}
1685
1686static inline int guestwidth_to_adjustwidth(int gaw)
1687{
1688 int agaw;
1689 int r = (gaw - 12) % 9;
1690
1691 if (r == 0)
1692 agaw = gaw;
1693 else
1694 agaw = gaw + 9 - r;
1695 if (agaw > 64)
1696 agaw = 64;
1697 return agaw;
1698}
1699
1700static int domain_init(struct dmar_domain *domain, int guest_width)
1701{
1702 struct intel_iommu *iommu;
1703 int adjust_width, agaw;
1704 unsigned long sagaw;
1705
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001706 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1707 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001708 domain_reserve_special_ranges(domain);
1709
1710 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001711 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001712 if (guest_width > cap_mgaw(iommu->cap))
1713 guest_width = cap_mgaw(iommu->cap);
1714 domain->gaw = guest_width;
1715 adjust_width = guestwidth_to_adjustwidth(guest_width);
1716 agaw = width_to_agaw(adjust_width);
1717 sagaw = cap_sagaw(iommu->cap);
1718 if (!test_bit(agaw, &sagaw)) {
1719 /* hardware doesn't support it, choose a bigger one */
1720 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1721 agaw = find_next_bit(&sagaw, 5, agaw);
1722 if (agaw >= 5)
1723 return -ENODEV;
1724 }
1725 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001726
Weidong Han8e6040972008-12-08 15:49:06 +08001727 if (ecap_coherent(iommu->ecap))
1728 domain->iommu_coherency = 1;
1729 else
1730 domain->iommu_coherency = 0;
1731
Sheng Yang58c610b2009-03-18 15:33:05 +08001732 if (ecap_sc_support(iommu->ecap))
1733 domain->iommu_snooping = 1;
1734 else
1735 domain->iommu_snooping = 0;
1736
David Woodhouse214e39a2014-03-19 10:38:49 +00001737 if (intel_iommu_superpage)
1738 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1739 else
1740 domain->iommu_superpage = 0;
1741
Suresh Siddha4c923d42009-10-02 11:01:24 -07001742 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001743
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001744 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001745 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001746 if (!domain->pgd)
1747 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001748 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001749 return 0;
1750}
1751
1752static void domain_exit(struct dmar_domain *domain)
1753{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001754 struct dmar_drhd_unit *drhd;
1755 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00001756 struct page *freelist = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001757
1758 /* Domain 0 is reserved, so dont process it */
1759 if (!domain)
1760 return;
1761
Alex Williamson7b668352011-05-24 12:02:41 +01001762 /* Flush any lazy unmaps that may reference this domain */
1763 if (!intel_iommu_strict)
1764 flush_unmaps_timeout(0);
1765
Jiang Liu92d03cc2014-02-19 14:07:28 +08001766 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001767 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001768
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001769 /* destroy iovas */
1770 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001771
David Woodhouseea8ea462014-03-05 17:09:32 +00001772 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001773
Jiang Liu92d03cc2014-02-19 14:07:28 +08001774 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001775 rcu_read_lock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001776 for_each_active_iommu(iommu, drhd)
Jiang Liufb170fb2014-07-11 14:19:28 +08001777 iommu_detach_domain(domain, iommu);
Jiang Liu0e242612014-02-19 14:07:34 +08001778 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001779
David Woodhouseea8ea462014-03-05 17:09:32 +00001780 dma_free_pagelist(freelist);
1781
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001782 free_domain_mem(domain);
1783}
1784
David Woodhouse64ae8922014-03-09 12:52:30 -07001785static int domain_context_mapping_one(struct dmar_domain *domain,
1786 struct intel_iommu *iommu,
1787 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001788{
1789 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001790 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001791 struct dma_pte *pgd;
Weidong Hanea6606b2008-12-08 23:08:15 +08001792 int id;
1793 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001794 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001795
1796 pr_debug("Set context mapping for %02x:%02x.%d\n",
1797 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001798
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001799 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001800 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1801 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001802
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001803 context = device_to_context_entry(iommu, bus, devfn);
1804 if (!context)
1805 return -ENOMEM;
1806 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001807 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001808 spin_unlock_irqrestore(&iommu->lock, flags);
1809 return 0;
1810 }
1811
Weidong Hanea6606b2008-12-08 23:08:15 +08001812 id = domain->id;
1813 pgd = domain->pgd;
1814
Jiang Liuab8dfe22014-07-11 14:19:27 +08001815 if (domain_type_is_vm_or_si(domain)) {
Jiang Liu44bde612014-07-11 14:19:29 +08001816 if (domain_type_is_vm(domain)) {
1817 id = iommu_attach_vm_domain(domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08001818 if (id < 0) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001819 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001820 pr_err("IOMMU: no free domain ids\n");
Weidong Hanea6606b2008-12-08 23:08:15 +08001821 return -EFAULT;
1822 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001823 }
1824
1825 /* Skip top levels of page tables for
1826 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001827 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001828 */
Chris Wright1672af12009-12-02 12:06:34 -08001829 if (translation != CONTEXT_TT_PASS_THROUGH) {
1830 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1831 pgd = phys_to_virt(dma_pte_addr(pgd));
1832 if (!dma_pte_present(pgd)) {
1833 spin_unlock_irqrestore(&iommu->lock, flags);
1834 return -ENOMEM;
1835 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001836 }
1837 }
1838 }
1839
1840 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001841
Yu Zhao93a23a72009-05-18 13:51:37 +08001842 if (translation != CONTEXT_TT_PASS_THROUGH) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001843 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Yu Zhao93a23a72009-05-18 13:51:37 +08001844 translation = info ? CONTEXT_TT_DEV_IOTLB :
1845 CONTEXT_TT_MULTI_LEVEL;
1846 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001847 /*
1848 * In pass through mode, AW must be programmed to indicate the largest
1849 * AGAW value supported by hardware. And ASR is ignored by hardware.
1850 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001851 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001852 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001853 else {
1854 context_set_address_root(context, virt_to_phys(pgd));
1855 context_set_address_width(context, iommu->agaw);
1856 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001857
1858 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001859 context_set_fault_enable(context);
1860 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001861 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001862
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001863 /*
1864 * It's a non-present to present mapping. If hardware doesn't cache
1865 * non-present entry we only need to flush the write-buffer. If the
1866 * _does_ cache non-present entries, then it does so in the special
1867 * domain #0, which we have to flush:
1868 */
1869 if (cap_caching_mode(iommu->cap)) {
1870 iommu->flush.flush_context(iommu, 0,
1871 (((u16)bus) << 8) | devfn,
1872 DMA_CCMD_MASK_NOBIT,
1873 DMA_CCMD_DEVICE_INVL);
Jiang Liu18fd7792014-07-11 14:19:26 +08001874 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001875 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001876 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001877 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001878 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001879 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001880
Jiang Liufb170fb2014-07-11 14:19:28 +08001881 domain_attach_iommu(domain, iommu);
1882
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001883 return 0;
1884}
1885
Alex Williamson579305f2014-07-03 09:51:43 -06001886struct domain_context_mapping_data {
1887 struct dmar_domain *domain;
1888 struct intel_iommu *iommu;
1889 int translation;
1890};
1891
1892static int domain_context_mapping_cb(struct pci_dev *pdev,
1893 u16 alias, void *opaque)
1894{
1895 struct domain_context_mapping_data *data = opaque;
1896
1897 return domain_context_mapping_one(data->domain, data->iommu,
1898 PCI_BUS_NUM(alias), alias & 0xff,
1899 data->translation);
1900}
1901
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001902static int
David Woodhousee1f167f2014-03-09 15:24:46 -07001903domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1904 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001905{
David Woodhouse64ae8922014-03-09 12:52:30 -07001906 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001907 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06001908 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001909
David Woodhousee1f167f2014-03-09 15:24:46 -07001910 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07001911 if (!iommu)
1912 return -ENODEV;
1913
Alex Williamson579305f2014-07-03 09:51:43 -06001914 if (!dev_is_pci(dev))
1915 return domain_context_mapping_one(domain, iommu, bus, devfn,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001916 translation);
Alex Williamson579305f2014-07-03 09:51:43 -06001917
1918 data.domain = domain;
1919 data.iommu = iommu;
1920 data.translation = translation;
1921
1922 return pci_for_each_dma_alias(to_pci_dev(dev),
1923 &domain_context_mapping_cb, &data);
1924}
1925
1926static int domain_context_mapped_cb(struct pci_dev *pdev,
1927 u16 alias, void *opaque)
1928{
1929 struct intel_iommu *iommu = opaque;
1930
1931 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001932}
1933
David Woodhousee1f167f2014-03-09 15:24:46 -07001934static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001935{
Weidong Han5331fe62008-12-08 23:00:00 +08001936 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001937 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08001938
David Woodhousee1f167f2014-03-09 15:24:46 -07001939 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001940 if (!iommu)
1941 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001942
Alex Williamson579305f2014-07-03 09:51:43 -06001943 if (!dev_is_pci(dev))
1944 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07001945
Alex Williamson579305f2014-07-03 09:51:43 -06001946 return !pci_for_each_dma_alias(to_pci_dev(dev),
1947 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001948}
1949
Fenghua Yuf5329592009-08-04 15:09:37 -07001950/* Returns a number of VTD pages, but aligned to MM page size */
1951static inline unsigned long aligned_nrpages(unsigned long host_addr,
1952 size_t size)
1953{
1954 host_addr &= ~PAGE_MASK;
1955 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1956}
1957
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001958/* Return largest possible superpage level for a given mapping */
1959static inline int hardware_largepage_caps(struct dmar_domain *domain,
1960 unsigned long iov_pfn,
1961 unsigned long phy_pfn,
1962 unsigned long pages)
1963{
1964 int support, level = 1;
1965 unsigned long pfnmerge;
1966
1967 support = domain->iommu_superpage;
1968
1969 /* To use a large page, the virtual *and* physical addresses
1970 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1971 of them will mean we have to use smaller pages. So just
1972 merge them and check both at once. */
1973 pfnmerge = iov_pfn | phy_pfn;
1974
1975 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1976 pages >>= VTD_STRIDE_SHIFT;
1977 if (!pages)
1978 break;
1979 pfnmerge >>= VTD_STRIDE_SHIFT;
1980 level++;
1981 support--;
1982 }
1983 return level;
1984}
1985
David Woodhouse9051aa02009-06-29 12:30:54 +01001986static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1987 struct scatterlist *sg, unsigned long phys_pfn,
1988 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001989{
1990 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001991 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08001992 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001993 unsigned int largepage_lvl = 0;
1994 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001995
Jiang Liu162d1b12014-07-11 14:19:35 +08001996 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01001997
1998 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1999 return -EINVAL;
2000
2001 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2002
Jiang Liucc4f14a2014-11-26 09:42:10 +08002003 if (!sg) {
2004 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01002005 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2006 }
2007
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002008 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002009 uint64_t tmp;
2010
David Woodhousee1605492009-06-29 11:17:38 +01002011 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07002012 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01002013 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2014 sg->dma_length = sg->length;
2015 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002016 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002017 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002018
David Woodhousee1605492009-06-29 11:17:38 +01002019 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002020 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2021
David Woodhouse5cf0a762014-03-19 16:07:49 +00002022 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002023 if (!pte)
2024 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002025 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002026 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002027 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002028 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2029 /*
2030 * Ensure that old small page tables are
2031 * removed to make room for superpage,
2032 * if they exist.
2033 */
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002034 dma_pte_free_pagetable(domain, iov_pfn,
Jiang Liud41a4ad2014-07-11 14:19:34 +08002035 iov_pfn + lvl_pages - 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002036 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002037 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002038 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002039
David Woodhousee1605492009-06-29 11:17:38 +01002040 }
2041 /* We don't need lock here, nobody else
2042 * touches the iova range
2043 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002044 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002045 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002046 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01002047 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2048 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002049 if (dumps) {
2050 dumps--;
2051 debug_dma_dump_mappings(NULL);
2052 }
2053 WARN_ON(1);
2054 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002055
2056 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2057
2058 BUG_ON(nr_pages < lvl_pages);
2059 BUG_ON(sg_res < lvl_pages);
2060
2061 nr_pages -= lvl_pages;
2062 iov_pfn += lvl_pages;
2063 phys_pfn += lvl_pages;
2064 pteval += lvl_pages * VTD_PAGE_SIZE;
2065 sg_res -= lvl_pages;
2066
2067 /* If the next PTE would be the first in a new page, then we
2068 need to flush the cache on the entries we've just written.
2069 And then we'll need to recalculate 'pte', so clear it and
2070 let it get set again in the if (!pte) block above.
2071
2072 If we're done (!nr_pages) we need to flush the cache too.
2073
2074 Also if we've been setting superpages, we may need to
2075 recalculate 'pte' and switch back to smaller pages for the
2076 end of the mapping, if the trailing size is not enough to
2077 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002078 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002079 if (!nr_pages || first_pte_in_page(pte) ||
2080 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002081 domain_flush_cache(domain, first_pte,
2082 (void *)pte - (void *)first_pte);
2083 pte = NULL;
2084 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002085
2086 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002087 sg = sg_next(sg);
2088 }
2089 return 0;
2090}
2091
David Woodhouse9051aa02009-06-29 12:30:54 +01002092static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2093 struct scatterlist *sg, unsigned long nr_pages,
2094 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002095{
David Woodhouse9051aa02009-06-29 12:30:54 +01002096 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2097}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002098
David Woodhouse9051aa02009-06-29 12:30:54 +01002099static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2100 unsigned long phys_pfn, unsigned long nr_pages,
2101 int prot)
2102{
2103 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002104}
2105
Weidong Hanc7151a82008-12-08 22:51:37 +08002106static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002107{
Weidong Hanc7151a82008-12-08 22:51:37 +08002108 if (!iommu)
2109 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002110
2111 clear_context_table(iommu, bus, devfn);
2112 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002113 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002114 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002115}
2116
David Woodhouse109b9b02012-05-25 17:43:02 +01002117static inline void unlink_domain_info(struct device_domain_info *info)
2118{
2119 assert_spin_locked(&device_domain_lock);
2120 list_del(&info->link);
2121 list_del(&info->global);
2122 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002123 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002124}
2125
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002126static void domain_remove_dev_info(struct dmar_domain *domain)
2127{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002128 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002129 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002130
2131 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wang3a74ca02014-05-20 20:37:47 +08002132 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhouse109b9b02012-05-25 17:43:02 +01002133 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002134 spin_unlock_irqrestore(&device_domain_lock, flags);
2135
Yu Zhao93a23a72009-05-18 13:51:37 +08002136 iommu_disable_dev_iotlb(info);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002137 iommu_detach_dev(info->iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002138
Jiang Liuab8dfe22014-07-11 14:19:27 +08002139 if (domain_type_is_vm(domain)) {
David Woodhouse7c7faa12014-03-09 13:33:06 -07002140 iommu_detach_dependent_devices(info->iommu, info->dev);
Jiang Liufb170fb2014-07-11 14:19:28 +08002141 domain_detach_iommu(domain, info->iommu);
Jiang Liu92d03cc2014-02-19 14:07:28 +08002142 }
2143
2144 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002145 spin_lock_irqsave(&device_domain_lock, flags);
2146 }
2147 spin_unlock_irqrestore(&device_domain_lock, flags);
2148}
2149
2150/*
2151 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002152 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002153 */
David Woodhouse1525a292014-03-06 16:19:30 +00002154static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002155{
2156 struct device_domain_info *info;
2157
2158 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002159 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002160 if (info)
2161 return info->domain;
2162 return NULL;
2163}
2164
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002165static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002166dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2167{
2168 struct device_domain_info *info;
2169
2170 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002171 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002172 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002173 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002174
2175 return NULL;
2176}
2177
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002178static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
David Woodhouse41e80dca2014-03-09 13:55:54 -07002179 int bus, int devfn,
David Woodhouseb718cd32014-03-09 13:11:33 -07002180 struct device *dev,
2181 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002182{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002183 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002184 struct device_domain_info *info;
2185 unsigned long flags;
2186
2187 info = alloc_devinfo_mem();
2188 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002189 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002190
Jiang Liu745f2582014-02-19 14:07:26 +08002191 info->bus = bus;
2192 info->devfn = devfn;
2193 info->dev = dev;
2194 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002195 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002196
2197 spin_lock_irqsave(&device_domain_lock, flags);
2198 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002199 found = find_domain(dev);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002200 else {
2201 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002202 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002203 if (info2)
2204 found = info2->domain;
2205 }
Jiang Liu745f2582014-02-19 14:07:26 +08002206 if (found) {
2207 spin_unlock_irqrestore(&device_domain_lock, flags);
2208 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002209 /* Caller must free the original domain */
2210 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002211 }
2212
David Woodhouseb718cd32014-03-09 13:11:33 -07002213 list_add(&info->link, &domain->devices);
2214 list_add(&info->global, &device_domain_list);
2215 if (dev)
2216 dev->archdata.iommu = info;
2217 spin_unlock_irqrestore(&device_domain_lock, flags);
2218
2219 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002220}
2221
Alex Williamson579305f2014-07-03 09:51:43 -06002222static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2223{
2224 *(u16 *)opaque = alias;
2225 return 0;
2226}
2227
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002228/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002229static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002230{
Alex Williamson579305f2014-07-03 09:51:43 -06002231 struct dmar_domain *domain, *tmp;
2232 struct intel_iommu *iommu;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002233 struct device_domain_info *info;
Alex Williamson579305f2014-07-03 09:51:43 -06002234 u16 dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002235 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002236 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002237
David Woodhouse146922e2014-03-09 15:44:17 -07002238 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002239 if (domain)
2240 return domain;
2241
David Woodhouse146922e2014-03-09 15:44:17 -07002242 iommu = device_to_iommu(dev, &bus, &devfn);
2243 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002244 return NULL;
2245
2246 if (dev_is_pci(dev)) {
2247 struct pci_dev *pdev = to_pci_dev(dev);
2248
2249 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2250
2251 spin_lock_irqsave(&device_domain_lock, flags);
2252 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2253 PCI_BUS_NUM(dma_alias),
2254 dma_alias & 0xff);
2255 if (info) {
2256 iommu = info->iommu;
2257 domain = info->domain;
2258 }
2259 spin_unlock_irqrestore(&device_domain_lock, flags);
2260
2261 /* DMA alias already has a domain, uses it */
2262 if (info)
2263 goto found_domain;
2264 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002265
David Woodhouse146922e2014-03-09 15:44:17 -07002266 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002267 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002268 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002269 return NULL;
Jiang Liu44bde612014-07-11 14:19:29 +08002270 domain->id = iommu_attach_domain(domain, iommu);
2271 if (domain->id < 0) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002272 free_domain_mem(domain);
Alex Williamson579305f2014-07-03 09:51:43 -06002273 return NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002274 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002275 domain_attach_iommu(domain, iommu);
Alex Williamson579305f2014-07-03 09:51:43 -06002276 if (domain_init(domain, gaw)) {
2277 domain_exit(domain);
2278 return NULL;
2279 }
2280
2281 /* register PCI DMA alias device */
2282 if (dev_is_pci(dev)) {
2283 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2284 dma_alias & 0xff, NULL, domain);
2285
2286 if (!tmp || tmp != domain) {
2287 domain_exit(domain);
2288 domain = tmp;
2289 }
2290
David Woodhouseb718cd32014-03-09 13:11:33 -07002291 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002292 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002293 }
2294
2295found_domain:
Alex Williamson579305f2014-07-03 09:51:43 -06002296 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2297
2298 if (!tmp || tmp != domain) {
2299 domain_exit(domain);
2300 domain = tmp;
2301 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002302
2303 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002304}
2305
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002306static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002307#define IDENTMAP_ALL 1
2308#define IDENTMAP_GFX 2
2309#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002310
David Woodhouseb2132032009-06-26 18:50:28 +01002311static int iommu_domain_identity_map(struct dmar_domain *domain,
2312 unsigned long long start,
2313 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002314{
David Woodhousec5395d52009-06-28 16:35:56 +01002315 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2316 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002317
David Woodhousec5395d52009-06-28 16:35:56 +01002318 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2319 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002320 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002321 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002322 }
2323
David Woodhousec5395d52009-06-28 16:35:56 +01002324 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2325 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002326 /*
2327 * RMRR range might have overlap with physical memory range,
2328 * clear it first
2329 */
David Woodhousec5395d52009-06-28 16:35:56 +01002330 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002331
David Woodhousec5395d52009-06-28 16:35:56 +01002332 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2333 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002334 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002335}
2336
David Woodhouse0b9d9752014-03-09 15:48:15 -07002337static int iommu_prepare_identity_map(struct device *dev,
David Woodhouseb2132032009-06-26 18:50:28 +01002338 unsigned long long start,
2339 unsigned long long end)
2340{
2341 struct dmar_domain *domain;
2342 int ret;
2343
David Woodhouse0b9d9752014-03-09 15:48:15 -07002344 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002345 if (!domain)
2346 return -ENOMEM;
2347
David Woodhouse19943b02009-08-04 16:19:20 +01002348 /* For _hardware_ passthrough, don't bother. But for software
2349 passthrough, we do it anyway -- it may indicate a memory
2350 range which is reserved in E820, so which didn't get set
2351 up to start with in si_domain */
2352 if (domain == si_domain && hw_pass_through) {
2353 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002354 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002355 return 0;
2356 }
2357
2358 printk(KERN_INFO
2359 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002360 dev_name(dev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002361
David Woodhouse5595b522009-12-02 09:21:55 +00002362 if (end < start) {
2363 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2364 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2365 dmi_get_system_info(DMI_BIOS_VENDOR),
2366 dmi_get_system_info(DMI_BIOS_VERSION),
2367 dmi_get_system_info(DMI_PRODUCT_VERSION));
2368 ret = -EIO;
2369 goto error;
2370 }
2371
David Woodhouse2ff729f2009-08-26 14:25:41 +01002372 if (end >> agaw_to_width(domain->agaw)) {
2373 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2374 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2375 agaw_to_width(domain->agaw),
2376 dmi_get_system_info(DMI_BIOS_VENDOR),
2377 dmi_get_system_info(DMI_BIOS_VERSION),
2378 dmi_get_system_info(DMI_PRODUCT_VERSION));
2379 ret = -EIO;
2380 goto error;
2381 }
David Woodhouse19943b02009-08-04 16:19:20 +01002382
David Woodhouseb2132032009-06-26 18:50:28 +01002383 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002384 if (ret)
2385 goto error;
2386
2387 /* context entry init */
David Woodhouse0b9d9752014-03-09 15:48:15 -07002388 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002389 if (ret)
2390 goto error;
2391
2392 return 0;
2393
2394 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002395 domain_exit(domain);
2396 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002397}
2398
2399static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002400 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002401{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002402 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002403 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002404 return iommu_prepare_identity_map(dev, rmrr->base_address,
2405 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002406}
2407
Suresh Siddhad3f13812011-08-23 17:05:25 -07002408#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002409static inline void iommu_prepare_isa(void)
2410{
2411 struct pci_dev *pdev;
2412 int ret;
2413
2414 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2415 if (!pdev)
2416 return;
2417
David Woodhousec7ab48d2009-06-26 19:10:36 +01002418 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002419 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002420
2421 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002422 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2423 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002424
Yijing Wang9b27e822014-05-20 20:37:52 +08002425 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002426}
2427#else
2428static inline void iommu_prepare_isa(void)
2429{
2430 return;
2431}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002432#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002433
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002434static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002435
Matt Kraai071e1372009-08-23 22:30:22 -07002436static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002437{
2438 struct dmar_drhd_unit *drhd;
2439 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002440 int nid, ret = 0;
Jiang Liu44bde612014-07-11 14:19:29 +08002441 bool first = true;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002442
Jiang Liuab8dfe22014-07-11 14:19:27 +08002443 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002444 if (!si_domain)
2445 return -EFAULT;
2446
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002447 for_each_active_iommu(iommu, drhd) {
2448 ret = iommu_attach_domain(si_domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08002449 if (ret < 0) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002450 domain_exit(si_domain);
2451 return -EFAULT;
Jiang Liu44bde612014-07-11 14:19:29 +08002452 } else if (first) {
2453 si_domain->id = ret;
2454 first = false;
2455 } else if (si_domain->id != ret) {
2456 domain_exit(si_domain);
2457 return -EFAULT;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002458 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002459 domain_attach_iommu(si_domain, iommu);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002460 }
2461
2462 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2463 domain_exit(si_domain);
2464 return -EFAULT;
2465 }
2466
Jiang Liu9544c002014-01-06 14:18:13 +08002467 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2468 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002469
David Woodhouse19943b02009-08-04 16:19:20 +01002470 if (hw)
2471 return 0;
2472
David Woodhousec7ab48d2009-06-26 19:10:36 +01002473 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002474 unsigned long start_pfn, end_pfn;
2475 int i;
2476
2477 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2478 ret = iommu_domain_identity_map(si_domain,
2479 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2480 if (ret)
2481 return ret;
2482 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002483 }
2484
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002485 return 0;
2486}
2487
David Woodhouse9b226622014-03-09 14:03:28 -07002488static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002489{
2490 struct device_domain_info *info;
2491
2492 if (likely(!iommu_identity_mapping))
2493 return 0;
2494
David Woodhouse9b226622014-03-09 14:03:28 -07002495 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002496 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2497 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002498
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002499 return 0;
2500}
2501
2502static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5913c9b2014-03-09 16:27:31 -07002503 struct device *dev, int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002504{
David Woodhouse0ac72662014-03-09 13:19:22 -07002505 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002506 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002507 u8 bus, devfn;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002508 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002509
David Woodhouse5913c9b2014-03-09 16:27:31 -07002510 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002511 if (!iommu)
2512 return -ENODEV;
2513
David Woodhouse5913c9b2014-03-09 16:27:31 -07002514 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002515 if (ndomain != domain)
2516 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002517
David Woodhouse5913c9b2014-03-09 16:27:31 -07002518 ret = domain_context_mapping(domain, dev, translation);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002519 if (ret) {
David Woodhouse5913c9b2014-03-09 16:27:31 -07002520 domain_remove_one_dev_info(domain, dev);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002521 return ret;
2522 }
2523
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002524 return 0;
2525}
2526
David Woodhouse0b9d9752014-03-09 15:48:15 -07002527static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002528{
2529 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002530 struct device *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002531 int i;
2532
Jiang Liu0e242612014-02-19 14:07:34 +08002533 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002534 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002535 /*
2536 * Return TRUE if this RMRR contains the device that
2537 * is passed in.
2538 */
2539 for_each_active_dev_scope(rmrr->devices,
2540 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002541 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002542 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002543 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002544 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002545 }
Jiang Liu0e242612014-02-19 14:07:34 +08002546 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002547 return false;
2548}
2549
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002550/*
2551 * There are a couple cases where we need to restrict the functionality of
2552 * devices associated with RMRRs. The first is when evaluating a device for
2553 * identity mapping because problems exist when devices are moved in and out
2554 * of domains and their respective RMRR information is lost. This means that
2555 * a device with associated RMRRs will never be in a "passthrough" domain.
2556 * The second is use of the device through the IOMMU API. This interface
2557 * expects to have full control of the IOVA space for the device. We cannot
2558 * satisfy both the requirement that RMRR access is maintained and have an
2559 * unencumbered IOVA space. We also have no ability to quiesce the device's
2560 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2561 * We therefore prevent devices associated with an RMRR from participating in
2562 * the IOMMU API, which eliminates them from device assignment.
2563 *
2564 * In both cases we assume that PCI USB devices with RMRRs have them largely
2565 * for historical reasons and that the RMRR space is not actively used post
2566 * boot. This exclusion may change if vendors begin to abuse it.
2567 */
2568static bool device_is_rmrr_locked(struct device *dev)
2569{
2570 if (!device_has_rmrr(dev))
2571 return false;
2572
2573 if (dev_is_pci(dev)) {
2574 struct pci_dev *pdev = to_pci_dev(dev);
2575
2576 if ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
2577 return false;
2578 }
2579
2580 return true;
2581}
2582
David Woodhouse3bdb2592014-03-09 16:03:08 -07002583static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002584{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002585
David Woodhouse3bdb2592014-03-09 16:03:08 -07002586 if (dev_is_pci(dev)) {
2587 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002588
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002589 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002590 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002591
David Woodhouse3bdb2592014-03-09 16:03:08 -07002592 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2593 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002594
David Woodhouse3bdb2592014-03-09 16:03:08 -07002595 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2596 return 1;
2597
2598 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2599 return 0;
2600
2601 /*
2602 * We want to start off with all devices in the 1:1 domain, and
2603 * take them out later if we find they can't access all of memory.
2604 *
2605 * However, we can't do this for PCI devices behind bridges,
2606 * because all PCI devices behind the same bridge will end up
2607 * with the same source-id on their transactions.
2608 *
2609 * Practically speaking, we can't change things around for these
2610 * devices at run-time, because we can't be sure there'll be no
2611 * DMA transactions in flight for any of their siblings.
2612 *
2613 * So PCI devices (unless they're on the root bus) as well as
2614 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2615 * the 1:1 domain, just in _case_ one of their siblings turns out
2616 * not to be able to map all of memory.
2617 */
2618 if (!pci_is_pcie(pdev)) {
2619 if (!pci_is_root_bus(pdev->bus))
2620 return 0;
2621 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2622 return 0;
2623 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2624 return 0;
2625 } else {
2626 if (device_has_rmrr(dev))
2627 return 0;
2628 }
David Woodhouse6941af22009-07-04 18:24:27 +01002629
David Woodhouse3dfc8132009-07-04 19:11:08 +01002630 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002631 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002632 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002633 * take them out of the 1:1 domain later.
2634 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002635 if (!startup) {
2636 /*
2637 * If the device's dma_mask is less than the system's memory
2638 * size then this is not a candidate for identity mapping.
2639 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002640 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002641
David Woodhouse3bdb2592014-03-09 16:03:08 -07002642 if (dev->coherent_dma_mask &&
2643 dev->coherent_dma_mask < dma_mask)
2644 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002645
David Woodhouse3bdb2592014-03-09 16:03:08 -07002646 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002647 }
David Woodhouse6941af22009-07-04 18:24:27 +01002648
2649 return 1;
2650}
2651
David Woodhousecf04eee2014-03-21 16:49:04 +00002652static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2653{
2654 int ret;
2655
2656 if (!iommu_should_identity_map(dev, 1))
2657 return 0;
2658
2659 ret = domain_add_dev_info(si_domain, dev,
2660 hw ? CONTEXT_TT_PASS_THROUGH :
2661 CONTEXT_TT_MULTI_LEVEL);
2662 if (!ret)
2663 pr_info("IOMMU: %s identity mapping for device %s\n",
2664 hw ? "hardware" : "software", dev_name(dev));
2665 else if (ret == -ENODEV)
2666 /* device not associated with an iommu */
2667 ret = 0;
2668
2669 return ret;
2670}
2671
2672
Matt Kraai071e1372009-08-23 22:30:22 -07002673static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002674{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002675 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002676 struct dmar_drhd_unit *drhd;
2677 struct intel_iommu *iommu;
2678 struct device *dev;
2679 int i;
2680 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002681
David Woodhouse19943b02009-08-04 16:19:20 +01002682 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002683 if (ret)
2684 return -EFAULT;
2685
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002686 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002687 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2688 if (ret)
2689 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002690 }
2691
David Woodhousecf04eee2014-03-21 16:49:04 +00002692 for_each_active_iommu(iommu, drhd)
2693 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2694 struct acpi_device_physical_node *pn;
2695 struct acpi_device *adev;
2696
2697 if (dev->bus != &acpi_bus_type)
2698 continue;
2699
2700 adev= to_acpi_device(dev);
2701 mutex_lock(&adev->physical_node_lock);
2702 list_for_each_entry(pn, &adev->physical_node_list, node) {
2703 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2704 if (ret)
2705 break;
2706 }
2707 mutex_unlock(&adev->physical_node_lock);
2708 if (ret)
2709 return ret;
2710 }
2711
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002712 return 0;
2713}
2714
Jiang Liuffebeb42014-11-09 22:48:02 +08002715static void intel_iommu_init_qi(struct intel_iommu *iommu)
2716{
2717 /*
2718 * Start from the sane iommu hardware state.
2719 * If the queued invalidation is already initialized by us
2720 * (for example, while enabling interrupt-remapping) then
2721 * we got the things already rolling from a sane state.
2722 */
2723 if (!iommu->qi) {
2724 /*
2725 * Clear any previous faults.
2726 */
2727 dmar_fault(-1, iommu);
2728 /*
2729 * Disable queued invalidation if supported and already enabled
2730 * before OS handover.
2731 */
2732 dmar_disable_qi(iommu);
2733 }
2734
2735 if (dmar_enable_qi(iommu)) {
2736 /*
2737 * Queued Invalidate not enabled, use Register Based Invalidate
2738 */
2739 iommu->flush.flush_context = __iommu_flush_context;
2740 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2741 pr_info("IOMMU: %s using Register based invalidation\n",
2742 iommu->name);
2743 } else {
2744 iommu->flush.flush_context = qi_flush_context;
2745 iommu->flush.flush_iotlb = qi_flush_iotlb;
2746 pr_info("IOMMU: %s using Queued invalidation\n", iommu->name);
2747 }
2748}
2749
Joseph Cihulab7792602011-05-03 00:08:37 -07002750static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002751{
2752 struct dmar_drhd_unit *drhd;
2753 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002754 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002755 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002756 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002757
2758 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002759 * for each drhd
2760 * allocate root
2761 * initialize and program root entry to not present
2762 * endfor
2763 */
2764 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002765 /*
2766 * lock not needed as this is only incremented in the single
2767 * threaded kernel __init code path all other access are read
2768 * only
2769 */
Jiang Liu78d8e702014-11-09 22:47:57 +08002770 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08002771 g_num_of_iommus++;
2772 continue;
2773 }
2774 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
Jiang Liu78d8e702014-11-09 22:47:57 +08002775 DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002776 }
2777
Jiang Liuffebeb42014-11-09 22:48:02 +08002778 /* Preallocate enough resources for IOMMU hot-addition */
2779 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
2780 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
2781
Weidong Hand9630fe2008-12-08 11:06:32 +08002782 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2783 GFP_KERNEL);
2784 if (!g_iommus) {
2785 printk(KERN_ERR "Allocating global iommu array failed\n");
2786 ret = -ENOMEM;
2787 goto error;
2788 }
2789
mark gross80b20dd2008-04-18 13:53:58 -07002790 deferred_flush = kzalloc(g_num_of_iommus *
2791 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2792 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002793 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08002794 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08002795 }
2796
Jiang Liu7c919772014-01-06 14:18:18 +08002797 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002798 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002799
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002800 ret = iommu_init_domains(iommu);
2801 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002802 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002803
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002804 /*
2805 * TBD:
2806 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002807 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002808 */
2809 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08002810 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002811 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002812 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002813 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002814 }
2815
Jiang Liuffebeb42014-11-09 22:48:02 +08002816 for_each_active_iommu(iommu, drhd)
2817 intel_iommu_init_qi(iommu);
Youquan Songa77b67d2008-10-16 16:31:56 -07002818
David Woodhouse19943b02009-08-04 16:19:20 +01002819 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002820 iommu_identity_mapping |= IDENTMAP_ALL;
2821
Suresh Siddhad3f13812011-08-23 17:05:25 -07002822#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002823 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002824#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002825
2826 check_tylersburg_isoch();
2827
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002828 /*
2829 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002830 * identity mappings for rmrr, gfx, and isa and may fall back to static
2831 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002832 */
David Woodhouse19943b02009-08-04 16:19:20 +01002833 if (iommu_identity_mapping) {
2834 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2835 if (ret) {
2836 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002837 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002838 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002839 }
David Woodhouse19943b02009-08-04 16:19:20 +01002840 /*
2841 * For each rmrr
2842 * for each dev attached to rmrr
2843 * do
2844 * locate drhd for dev, alloc domain for dev
2845 * allocate free domain
2846 * allocate page table entries for rmrr
2847 * if context not allocated for bus
2848 * allocate and init context
2849 * set present in root table for this bus
2850 * init context with domain, translation etc
2851 * endfor
2852 * endfor
2853 */
2854 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2855 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002856 /* some BIOS lists non-exist devices in DMAR table. */
2857 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00002858 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07002859 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01002860 if (ret)
2861 printk(KERN_ERR
2862 "IOMMU: mapping reserved region failed\n");
2863 }
2864 }
2865
2866 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002867
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002868 /*
2869 * for each drhd
2870 * enable fault log
2871 * global invalidate context cache
2872 * global invalidate iotlb
2873 * enable translation
2874 */
Jiang Liu7c919772014-01-06 14:18:18 +08002875 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002876 if (drhd->ignored) {
2877 /*
2878 * we always have to disable PMRs or DMA may fail on
2879 * this device
2880 */
2881 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08002882 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002883 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002884 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002885
2886 iommu_flush_write_buffer(iommu);
2887
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002888 ret = dmar_set_interrupt(iommu);
2889 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002890 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002891
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002892 iommu_set_root_entry(iommu);
2893
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002894 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002895 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Jiang Liu2a41cce2014-07-11 14:19:33 +08002896 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07002897 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002898 }
2899
2900 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08002901
2902free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08002903 for_each_active_iommu(iommu, drhd) {
2904 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08002905 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08002906 }
Jiang Liu9bdc5312014-01-06 14:18:27 +08002907 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08002908free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08002909 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08002910error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002911 return ret;
2912}
2913
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002914/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002915static struct iova *intel_alloc_iova(struct device *dev,
2916 struct dmar_domain *domain,
2917 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002918{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002919 struct iova *iova = NULL;
2920
David Woodhouse875764d2009-06-28 21:20:51 +01002921 /* Restrict dma_mask to the width that the iommu can handle */
2922 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2923
2924 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002925 /*
2926 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002927 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002928 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002929 */
David Woodhouse875764d2009-06-28 21:20:51 +01002930 iova = alloc_iova(&domain->iovad, nrpages,
2931 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2932 if (iova)
2933 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002934 }
David Woodhouse875764d2009-06-28 21:20:51 +01002935 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2936 if (unlikely(!iova)) {
2937 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07002938 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002939 return NULL;
2940 }
2941
2942 return iova;
2943}
2944
David Woodhoused4b709f2014-03-09 16:07:40 -07002945static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002946{
2947 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002948 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002949
David Woodhoused4b709f2014-03-09 16:07:40 -07002950 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002951 if (!domain) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002952 printk(KERN_ERR "Allocating domain for %s failed",
2953 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002954 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002955 }
2956
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002957 /* make sure context mapping is ok */
David Woodhoused4b709f2014-03-09 16:07:40 -07002958 if (unlikely(!domain_context_mapped(dev))) {
2959 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002960 if (ret) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002961 printk(KERN_ERR "Domain context map for %s failed",
2962 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002963 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002964 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002965 }
2966
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002967 return domain;
2968}
2969
David Woodhoused4b709f2014-03-09 16:07:40 -07002970static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01002971{
2972 struct device_domain_info *info;
2973
2974 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07002975 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01002976 if (likely(info))
2977 return info->domain;
2978
2979 return __get_valid_domain_for_dev(dev);
2980}
2981
David Woodhouse3d891942014-03-06 15:59:26 +00002982static int iommu_dummy(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002983{
David Woodhouse3d891942014-03-06 15:59:26 +00002984 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002985}
2986
David Woodhouseecb509e2014-03-09 16:29:55 -07002987/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002988static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002989{
2990 int found;
2991
David Woodhouse3d891942014-03-06 15:59:26 +00002992 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002993 return 1;
2994
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002995 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002996 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002997
David Woodhouse9b226622014-03-09 14:03:28 -07002998 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002999 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07003000 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003001 return 1;
3002 else {
3003 /*
3004 * 32 bit DMA is removed from si_domain and fall back
3005 * to non-identity mapping.
3006 */
David Woodhousebf9c9ed2014-03-09 16:19:13 -07003007 domain_remove_one_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003008 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07003009 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003010 return 0;
3011 }
3012 } else {
3013 /*
3014 * In case of a detached 64 bit DMA device from vm, the device
3015 * is put into si_domain for identity mapping.
3016 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003017 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003018 int ret;
David Woodhouse5913c9b2014-03-09 16:27:31 -07003019 ret = domain_add_dev_info(si_domain, dev,
David Woodhouse5fe60f42009-08-09 10:53:41 +01003020 hw_pass_through ?
3021 CONTEXT_TT_PASS_THROUGH :
3022 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003023 if (!ret) {
3024 printk(KERN_INFO "64bit %s uses identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07003025 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003026 return 1;
3027 }
3028 }
3029 }
3030
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003031 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003032}
3033
David Woodhouse5040a912014-03-09 16:14:00 -07003034static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003035 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003036{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003037 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003038 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003039 struct iova *iova;
3040 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003041 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003042 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003043 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003044
3045 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003046
David Woodhouse5040a912014-03-09 16:14:00 -07003047 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003048 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003049
David Woodhouse5040a912014-03-09 16:14:00 -07003050 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003051 if (!domain)
3052 return 0;
3053
Weidong Han8c11e792008-12-08 15:29:22 +08003054 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003055 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003056
David Woodhouse5040a912014-03-09 16:14:00 -07003057 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003058 if (!iova)
3059 goto error;
3060
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003061 /*
3062 * Check if DMAR supports zero-length reads on write only
3063 * mappings..
3064 */
3065 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003066 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003067 prot |= DMA_PTE_READ;
3068 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3069 prot |= DMA_PTE_WRITE;
3070 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003071 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003072 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003073 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003074 * is not a big problem
3075 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003076 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003077 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003078 if (ret)
3079 goto error;
3080
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003081 /* it's a non-present to present mapping. Only flush if caching mode */
3082 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003083 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003084 else
Weidong Han8c11e792008-12-08 15:29:22 +08003085 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003086
David Woodhouse03d6a242009-06-28 15:33:46 +01003087 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3088 start_paddr += paddr & ~PAGE_MASK;
3089 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003090
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003091error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003092 if (iova)
3093 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00003094 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003095 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003096 return 0;
3097}
3098
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003099static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3100 unsigned long offset, size_t size,
3101 enum dma_data_direction dir,
3102 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003103{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003104 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003105 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003106}
3107
mark gross5e0d2a62008-03-04 15:22:08 -08003108static void flush_unmaps(void)
3109{
mark gross80b20dd2008-04-18 13:53:58 -07003110 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003111
mark gross5e0d2a62008-03-04 15:22:08 -08003112 timer_on = 0;
3113
3114 /* just flush them all */
3115 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003116 struct intel_iommu *iommu = g_iommus[i];
3117 if (!iommu)
3118 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003119
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003120 if (!deferred_flush[i].next)
3121 continue;
3122
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003123 /* In caching mode, global flushes turn emulation expensive */
3124 if (!cap_caching_mode(iommu->cap))
3125 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003126 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003127 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003128 unsigned long mask;
3129 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003130 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003131
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003132 /* On real hardware multiple invalidations are expensive */
3133 if (cap_caching_mode(iommu->cap))
3134 iommu_flush_iotlb_psi(iommu, domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08003135 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00003136 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003137 else {
Jiang Liua156ef92014-07-11 14:19:36 +08003138 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003139 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3140 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3141 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003142 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003143 if (deferred_flush[i].freelist[j])
3144 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003145 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003146 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003147 }
3148
mark gross5e0d2a62008-03-04 15:22:08 -08003149 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003150}
3151
3152static void flush_unmaps_timeout(unsigned long data)
3153{
mark gross80b20dd2008-04-18 13:53:58 -07003154 unsigned long flags;
3155
3156 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003157 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003158 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003159}
3160
David Woodhouseea8ea462014-03-05 17:09:32 +00003161static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003162{
3163 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003164 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003165 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003166
3167 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003168 if (list_size == HIGH_WATER_MARK)
3169 flush_unmaps();
3170
Weidong Han8c11e792008-12-08 15:29:22 +08003171 iommu = domain_get_iommu(dom);
3172 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003173
mark gross80b20dd2008-04-18 13:53:58 -07003174 next = deferred_flush[iommu_id].next;
3175 deferred_flush[iommu_id].domain[next] = dom;
3176 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003177 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003178 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003179
3180 if (!timer_on) {
3181 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3182 timer_on = 1;
3183 }
3184 list_size++;
3185 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3186}
3187
Jiang Liud41a4ad2014-07-11 14:19:34 +08003188static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003189{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003190 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003191 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003192 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003193 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003194 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003195
David Woodhouse73676832009-07-04 14:08:36 +01003196 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003197 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003198
David Woodhouse1525a292014-03-06 16:19:30 +00003199 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003200 BUG_ON(!domain);
3201
Weidong Han8c11e792008-12-08 15:29:22 +08003202 iommu = domain_get_iommu(domain);
3203
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003204 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003205 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3206 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003207 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003208
David Woodhoused794dc92009-06-28 00:27:49 +01003209 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3210 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003211
David Woodhoused794dc92009-06-28 00:27:49 +01003212 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003213 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003214
David Woodhouseea8ea462014-03-05 17:09:32 +00003215 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003216
mark gross5e0d2a62008-03-04 15:22:08 -08003217 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01003218 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003219 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003220 /* free iova */
3221 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003222 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003223 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003224 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003225 /*
3226 * queue up the release of the unmap to save the 1/6th of the
3227 * cpu used up by the iotlb flush operation...
3228 */
mark gross5e0d2a62008-03-04 15:22:08 -08003229 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003230}
3231
Jiang Liud41a4ad2014-07-11 14:19:34 +08003232static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3233 size_t size, enum dma_data_direction dir,
3234 struct dma_attrs *attrs)
3235{
3236 intel_unmap(dev, dev_addr);
3237}
3238
David Woodhouse5040a912014-03-09 16:14:00 -07003239static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003240 dma_addr_t *dma_handle, gfp_t flags,
3241 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003242{
Akinobu Mita36746432014-06-04 16:06:51 -07003243 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003244 int order;
3245
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003246 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003247 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003248
David Woodhouse5040a912014-03-09 16:14:00 -07003249 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003250 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003251 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3252 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003253 flags |= GFP_DMA;
3254 else
3255 flags |= GFP_DMA32;
3256 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003257
Akinobu Mita36746432014-06-04 16:06:51 -07003258 if (flags & __GFP_WAIT) {
3259 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003260
Akinobu Mita36746432014-06-04 16:06:51 -07003261 page = dma_alloc_from_contiguous(dev, count, order);
3262 if (page && iommu_no_mapping(dev) &&
3263 page_to_phys(page) + size > dev->coherent_dma_mask) {
3264 dma_release_from_contiguous(dev, page, count);
3265 page = NULL;
3266 }
3267 }
3268
3269 if (!page)
3270 page = alloc_pages(flags, order);
3271 if (!page)
3272 return NULL;
3273 memset(page_address(page), 0, size);
3274
3275 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003276 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003277 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003278 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003279 return page_address(page);
3280 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3281 __free_pages(page, order);
3282
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003283 return NULL;
3284}
3285
David Woodhouse5040a912014-03-09 16:14:00 -07003286static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003287 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003288{
3289 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003290 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003291
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003292 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003293 order = get_order(size);
3294
Jiang Liud41a4ad2014-07-11 14:19:34 +08003295 intel_unmap(dev, dma_handle);
Akinobu Mita36746432014-06-04 16:06:51 -07003296 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3297 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003298}
3299
David Woodhouse5040a912014-03-09 16:14:00 -07003300static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003301 int nelems, enum dma_data_direction dir,
3302 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003303{
Jiang Liud41a4ad2014-07-11 14:19:34 +08003304 intel_unmap(dev, sglist[0].dma_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003305}
3306
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003307static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003308 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003309{
3310 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003311 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003312
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003313 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003314 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003315 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003316 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003317 }
3318 return nelems;
3319}
3320
David Woodhouse5040a912014-03-09 16:14:00 -07003321static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003322 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003323{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003324 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003325 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003326 size_t size = 0;
3327 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003328 struct iova *iova = NULL;
3329 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003330 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003331 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003332 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003333
3334 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003335 if (iommu_no_mapping(dev))
3336 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003337
David Woodhouse5040a912014-03-09 16:14:00 -07003338 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003339 if (!domain)
3340 return 0;
3341
Weidong Han8c11e792008-12-08 15:29:22 +08003342 iommu = domain_get_iommu(domain);
3343
David Woodhouseb536d242009-06-28 14:49:31 +01003344 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003345 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003346
David Woodhouse5040a912014-03-09 16:14:00 -07003347 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3348 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003349 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003350 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003351 return 0;
3352 }
3353
3354 /*
3355 * Check if DMAR supports zero-length reads on write only
3356 * mappings..
3357 */
3358 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003359 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003360 prot |= DMA_PTE_READ;
3361 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3362 prot |= DMA_PTE_WRITE;
3363
David Woodhouseb536d242009-06-28 14:49:31 +01003364 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003365
Fenghua Yuf5329592009-08-04 15:09:37 -07003366 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003367 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003368 dma_pte_free_pagetable(domain, start_vpfn,
3369 start_vpfn + size - 1);
David Woodhousee1605492009-06-29 11:17:38 +01003370 __free_iova(&domain->iovad, iova);
3371 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003372 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003373
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003374 /* it's a non-present to present mapping. Only flush if caching mode */
3375 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003376 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003377 else
Weidong Han8c11e792008-12-08 15:29:22 +08003378 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003379
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003380 return nelems;
3381}
3382
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003383static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3384{
3385 return !dma_addr;
3386}
3387
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003388struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003389 .alloc = intel_alloc_coherent,
3390 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003391 .map_sg = intel_map_sg,
3392 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003393 .map_page = intel_map_page,
3394 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003395 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003396};
3397
3398static inline int iommu_domain_cache_init(void)
3399{
3400 int ret = 0;
3401
3402 iommu_domain_cache = kmem_cache_create("iommu_domain",
3403 sizeof(struct dmar_domain),
3404 0,
3405 SLAB_HWCACHE_ALIGN,
3406
3407 NULL);
3408 if (!iommu_domain_cache) {
3409 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3410 ret = -ENOMEM;
3411 }
3412
3413 return ret;
3414}
3415
3416static inline int iommu_devinfo_cache_init(void)
3417{
3418 int ret = 0;
3419
3420 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3421 sizeof(struct device_domain_info),
3422 0,
3423 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003424 NULL);
3425 if (!iommu_devinfo_cache) {
3426 printk(KERN_ERR "Couldn't create devinfo cache\n");
3427 ret = -ENOMEM;
3428 }
3429
3430 return ret;
3431}
3432
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003433static int __init iommu_init_mempool(void)
3434{
3435 int ret;
3436 ret = iommu_iova_cache_init();
3437 if (ret)
3438 return ret;
3439
3440 ret = iommu_domain_cache_init();
3441 if (ret)
3442 goto domain_error;
3443
3444 ret = iommu_devinfo_cache_init();
3445 if (!ret)
3446 return ret;
3447
3448 kmem_cache_destroy(iommu_domain_cache);
3449domain_error:
Robin Murphy85b45452015-01-12 17:51:14 +00003450 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003451
3452 return -ENOMEM;
3453}
3454
3455static void __init iommu_exit_mempool(void)
3456{
3457 kmem_cache_destroy(iommu_devinfo_cache);
3458 kmem_cache_destroy(iommu_domain_cache);
Robin Murphy85b45452015-01-12 17:51:14 +00003459 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003460}
3461
Dan Williams556ab452010-07-23 15:47:56 -07003462static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3463{
3464 struct dmar_drhd_unit *drhd;
3465 u32 vtbar;
3466 int rc;
3467
3468 /* We know that this device on this chipset has its own IOMMU.
3469 * If we find it under a different IOMMU, then the BIOS is lying
3470 * to us. Hope that the IOMMU for this device is actually
3471 * disabled, and it needs no translation...
3472 */
3473 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3474 if (rc) {
3475 /* "can't" happen */
3476 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3477 return;
3478 }
3479 vtbar &= 0xffff0000;
3480
3481 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3482 drhd = dmar_find_matched_drhd_unit(pdev);
3483 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3484 TAINT_FIRMWARE_WORKAROUND,
3485 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3486 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3487}
3488DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3489
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003490static void __init init_no_remapping_devices(void)
3491{
3492 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003493 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003494 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003495
3496 for_each_drhd_unit(drhd) {
3497 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003498 for_each_active_dev_scope(drhd->devices,
3499 drhd->devices_cnt, i, dev)
3500 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003501 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003502 if (i == drhd->devices_cnt)
3503 drhd->ignored = 1;
3504 }
3505 }
3506
Jiang Liu7c919772014-01-06 14:18:18 +08003507 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003508 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003509 continue;
3510
Jiang Liub683b232014-02-19 14:07:32 +08003511 for_each_active_dev_scope(drhd->devices,
3512 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003513 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003514 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003515 if (i < drhd->devices_cnt)
3516 continue;
3517
David Woodhousec0771df2011-10-14 20:59:46 +01003518 /* This IOMMU has *only* gfx devices. Either bypass it or
3519 set the gfx_mapped flag, as appropriate */
3520 if (dmar_map_gfx) {
3521 intel_iommu_gfx_mapped = 1;
3522 } else {
3523 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003524 for_each_active_dev_scope(drhd->devices,
3525 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003526 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003527 }
3528 }
3529}
3530
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003531#ifdef CONFIG_SUSPEND
3532static int init_iommu_hw(void)
3533{
3534 struct dmar_drhd_unit *drhd;
3535 struct intel_iommu *iommu = NULL;
3536
3537 for_each_active_iommu(iommu, drhd)
3538 if (iommu->qi)
3539 dmar_reenable_qi(iommu);
3540
Joseph Cihulab7792602011-05-03 00:08:37 -07003541 for_each_iommu(iommu, drhd) {
3542 if (drhd->ignored) {
3543 /*
3544 * we always have to disable PMRs or DMA may fail on
3545 * this device
3546 */
3547 if (force_on)
3548 iommu_disable_protect_mem_regions(iommu);
3549 continue;
3550 }
3551
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003552 iommu_flush_write_buffer(iommu);
3553
3554 iommu_set_root_entry(iommu);
3555
3556 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003557 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08003558 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3559 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003560 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003561 }
3562
3563 return 0;
3564}
3565
3566static void iommu_flush_all(void)
3567{
3568 struct dmar_drhd_unit *drhd;
3569 struct intel_iommu *iommu;
3570
3571 for_each_active_iommu(iommu, drhd) {
3572 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003573 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003574 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003575 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003576 }
3577}
3578
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003579static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003580{
3581 struct dmar_drhd_unit *drhd;
3582 struct intel_iommu *iommu = NULL;
3583 unsigned long flag;
3584
3585 for_each_active_iommu(iommu, drhd) {
3586 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3587 GFP_ATOMIC);
3588 if (!iommu->iommu_state)
3589 goto nomem;
3590 }
3591
3592 iommu_flush_all();
3593
3594 for_each_active_iommu(iommu, drhd) {
3595 iommu_disable_translation(iommu);
3596
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003597 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003598
3599 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3600 readl(iommu->reg + DMAR_FECTL_REG);
3601 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3602 readl(iommu->reg + DMAR_FEDATA_REG);
3603 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3604 readl(iommu->reg + DMAR_FEADDR_REG);
3605 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3606 readl(iommu->reg + DMAR_FEUADDR_REG);
3607
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003608 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003609 }
3610 return 0;
3611
3612nomem:
3613 for_each_active_iommu(iommu, drhd)
3614 kfree(iommu->iommu_state);
3615
3616 return -ENOMEM;
3617}
3618
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003619static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003620{
3621 struct dmar_drhd_unit *drhd;
3622 struct intel_iommu *iommu = NULL;
3623 unsigned long flag;
3624
3625 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003626 if (force_on)
3627 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3628 else
3629 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003630 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003631 }
3632
3633 for_each_active_iommu(iommu, drhd) {
3634
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003635 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003636
3637 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3638 iommu->reg + DMAR_FECTL_REG);
3639 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3640 iommu->reg + DMAR_FEDATA_REG);
3641 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3642 iommu->reg + DMAR_FEADDR_REG);
3643 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3644 iommu->reg + DMAR_FEUADDR_REG);
3645
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003646 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003647 }
3648
3649 for_each_active_iommu(iommu, drhd)
3650 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003651}
3652
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003653static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003654 .resume = iommu_resume,
3655 .suspend = iommu_suspend,
3656};
3657
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003658static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003659{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003660 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003661}
3662
3663#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003664static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003665#endif /* CONFIG_PM */
3666
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003667
Jiang Liuc2a0b532014-11-09 22:47:56 +08003668int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003669{
3670 struct acpi_dmar_reserved_memory *rmrr;
3671 struct dmar_rmrr_unit *rmrru;
3672
3673 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3674 if (!rmrru)
3675 return -ENOMEM;
3676
3677 rmrru->hdr = header;
3678 rmrr = (struct acpi_dmar_reserved_memory *)header;
3679 rmrru->base_address = rmrr->base_address;
3680 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003681 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3682 ((void *)rmrr) + rmrr->header.length,
3683 &rmrru->devices_cnt);
3684 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3685 kfree(rmrru);
3686 return -ENOMEM;
3687 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003688
Jiang Liu2e455282014-02-19 14:07:36 +08003689 list_add(&rmrru->list, &dmar_rmrr_units);
3690
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003691 return 0;
3692}
3693
Jiang Liu6b197242014-11-09 22:47:58 +08003694static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3695{
3696 struct dmar_atsr_unit *atsru;
3697 struct acpi_dmar_atsr *tmp;
3698
3699 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3700 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3701 if (atsr->segment != tmp->segment)
3702 continue;
3703 if (atsr->header.length != tmp->header.length)
3704 continue;
3705 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3706 return atsru;
3707 }
3708
3709 return NULL;
3710}
3711
3712int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003713{
3714 struct acpi_dmar_atsr *atsr;
3715 struct dmar_atsr_unit *atsru;
3716
Jiang Liu6b197242014-11-09 22:47:58 +08003717 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
3718 return 0;
3719
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003720 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08003721 atsru = dmar_find_atsr(atsr);
3722 if (atsru)
3723 return 0;
3724
3725 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003726 if (!atsru)
3727 return -ENOMEM;
3728
Jiang Liu6b197242014-11-09 22:47:58 +08003729 /*
3730 * If memory is allocated from slab by ACPI _DSM method, we need to
3731 * copy the memory content because the memory buffer will be freed
3732 * on return.
3733 */
3734 atsru->hdr = (void *)(atsru + 1);
3735 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003736 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08003737 if (!atsru->include_all) {
3738 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3739 (void *)atsr + atsr->header.length,
3740 &atsru->devices_cnt);
3741 if (atsru->devices_cnt && atsru->devices == NULL) {
3742 kfree(atsru);
3743 return -ENOMEM;
3744 }
3745 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003746
Jiang Liu0e242612014-02-19 14:07:34 +08003747 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003748
3749 return 0;
3750}
3751
Jiang Liu9bdc5312014-01-06 14:18:27 +08003752static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3753{
3754 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3755 kfree(atsru);
3756}
3757
Jiang Liu6b197242014-11-09 22:47:58 +08003758int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3759{
3760 struct acpi_dmar_atsr *atsr;
3761 struct dmar_atsr_unit *atsru;
3762
3763 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3764 atsru = dmar_find_atsr(atsr);
3765 if (atsru) {
3766 list_del_rcu(&atsru->list);
3767 synchronize_rcu();
3768 intel_iommu_free_atsr(atsru);
3769 }
3770
3771 return 0;
3772}
3773
3774int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3775{
3776 int i;
3777 struct device *dev;
3778 struct acpi_dmar_atsr *atsr;
3779 struct dmar_atsr_unit *atsru;
3780
3781 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3782 atsru = dmar_find_atsr(atsr);
3783 if (!atsru)
3784 return 0;
3785
3786 if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
3787 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
3788 i, dev)
3789 return -EBUSY;
3790
3791 return 0;
3792}
3793
Jiang Liuffebeb42014-11-09 22:48:02 +08003794static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
3795{
3796 int sp, ret = 0;
3797 struct intel_iommu *iommu = dmaru->iommu;
3798
3799 if (g_iommus[iommu->seq_id])
3800 return 0;
3801
3802 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
3803 pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
3804 iommu->name);
3805 return -ENXIO;
3806 }
3807 if (!ecap_sc_support(iommu->ecap) &&
3808 domain_update_iommu_snooping(iommu)) {
3809 pr_warn("IOMMU: %s doesn't support snooping.\n",
3810 iommu->name);
3811 return -ENXIO;
3812 }
3813 sp = domain_update_iommu_superpage(iommu) - 1;
3814 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
3815 pr_warn("IOMMU: %s doesn't support large page.\n",
3816 iommu->name);
3817 return -ENXIO;
3818 }
3819
3820 /*
3821 * Disable translation if already enabled prior to OS handover.
3822 */
3823 if (iommu->gcmd & DMA_GCMD_TE)
3824 iommu_disable_translation(iommu);
3825
3826 g_iommus[iommu->seq_id] = iommu;
3827 ret = iommu_init_domains(iommu);
3828 if (ret == 0)
3829 ret = iommu_alloc_root_entry(iommu);
3830 if (ret)
3831 goto out;
3832
3833 if (dmaru->ignored) {
3834 /*
3835 * we always have to disable PMRs or DMA may fail on this device
3836 */
3837 if (force_on)
3838 iommu_disable_protect_mem_regions(iommu);
3839 return 0;
3840 }
3841
3842 intel_iommu_init_qi(iommu);
3843 iommu_flush_write_buffer(iommu);
3844 ret = dmar_set_interrupt(iommu);
3845 if (ret)
3846 goto disable_iommu;
3847
3848 iommu_set_root_entry(iommu);
3849 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3850 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3851 iommu_enable_translation(iommu);
3852
3853 if (si_domain) {
3854 ret = iommu_attach_domain(si_domain, iommu);
3855 if (ret < 0 || si_domain->id != ret)
3856 goto disable_iommu;
3857 domain_attach_iommu(si_domain, iommu);
3858 }
3859
3860 iommu_disable_protect_mem_regions(iommu);
3861 return 0;
3862
3863disable_iommu:
3864 disable_dmar_iommu(iommu);
3865out:
3866 free_dmar_iommu(iommu);
3867 return ret;
3868}
3869
Jiang Liu6b197242014-11-09 22:47:58 +08003870int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
3871{
Jiang Liuffebeb42014-11-09 22:48:02 +08003872 int ret = 0;
3873 struct intel_iommu *iommu = dmaru->iommu;
3874
3875 if (!intel_iommu_enabled)
3876 return 0;
3877 if (iommu == NULL)
3878 return -EINVAL;
3879
3880 if (insert) {
3881 ret = intel_iommu_add(dmaru);
3882 } else {
3883 disable_dmar_iommu(iommu);
3884 free_dmar_iommu(iommu);
3885 }
3886
3887 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08003888}
3889
Jiang Liu9bdc5312014-01-06 14:18:27 +08003890static void intel_iommu_free_dmars(void)
3891{
3892 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3893 struct dmar_atsr_unit *atsru, *atsr_n;
3894
3895 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3896 list_del(&rmrru->list);
3897 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3898 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003899 }
3900
Jiang Liu9bdc5312014-01-06 14:18:27 +08003901 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3902 list_del(&atsru->list);
3903 intel_iommu_free_atsr(atsru);
3904 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003905}
3906
3907int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3908{
Jiang Liub683b232014-02-19 14:07:32 +08003909 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003910 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00003911 struct pci_dev *bridge = NULL;
3912 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003913 struct acpi_dmar_atsr *atsr;
3914 struct dmar_atsr_unit *atsru;
3915
3916 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003917 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08003918 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003919 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003920 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003921 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003922 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003923 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003924 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08003925 if (!bridge)
3926 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003927
Jiang Liu0e242612014-02-19 14:07:34 +08003928 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08003929 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3930 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3931 if (atsr->segment != pci_domain_nr(dev->bus))
3932 continue;
3933
Jiang Liub683b232014-02-19 14:07:32 +08003934 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00003935 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08003936 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003937
3938 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08003939 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003940 }
Jiang Liub683b232014-02-19 14:07:32 +08003941 ret = 0;
3942out:
Jiang Liu0e242612014-02-19 14:07:34 +08003943 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003944
Jiang Liub683b232014-02-19 14:07:32 +08003945 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003946}
3947
Jiang Liu59ce0512014-02-19 14:07:35 +08003948int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3949{
3950 int ret = 0;
3951 struct dmar_rmrr_unit *rmrru;
3952 struct dmar_atsr_unit *atsru;
3953 struct acpi_dmar_atsr *atsr;
3954 struct acpi_dmar_reserved_memory *rmrr;
3955
3956 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3957 return 0;
3958
3959 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3960 rmrr = container_of(rmrru->hdr,
3961 struct acpi_dmar_reserved_memory, header);
3962 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3963 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3964 ((void *)rmrr) + rmrr->header.length,
3965 rmrr->segment, rmrru->devices,
3966 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08003967 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08003968 return ret;
3969 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08003970 dmar_remove_dev_scope(info, rmrr->segment,
3971 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08003972 }
3973 }
3974
3975 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3976 if (atsru->include_all)
3977 continue;
3978
3979 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3980 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3981 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3982 (void *)atsr + atsr->header.length,
3983 atsr->segment, atsru->devices,
3984 atsru->devices_cnt);
3985 if (ret > 0)
3986 break;
3987 else if(ret < 0)
3988 return ret;
3989 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3990 if (dmar_remove_dev_scope(info, atsr->segment,
3991 atsru->devices, atsru->devices_cnt))
3992 break;
3993 }
3994 }
3995
3996 return 0;
3997}
3998
Fenghua Yu99dcade2009-11-11 07:23:06 -08003999/*
4000 * Here we only respond to action of unbound device from driver.
4001 *
4002 * Added device is not attached to its DMAR domain here yet. That will happen
4003 * when mapping the device to iova.
4004 */
4005static int device_notifier(struct notifier_block *nb,
4006 unsigned long action, void *data)
4007{
4008 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08004009 struct dmar_domain *domain;
4010
David Woodhouse3d891942014-03-06 15:59:26 +00004011 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004012 return 0;
4013
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004014 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004015 return 0;
4016
David Woodhouse1525a292014-03-06 16:19:30 +00004017 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004018 if (!domain)
4019 return 0;
4020
Jiang Liu3a5670e2014-02-19 14:07:33 +08004021 down_read(&dmar_global_lock);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004022 domain_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004023 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004024 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08004025 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07004026
Fenghua Yu99dcade2009-11-11 07:23:06 -08004027 return 0;
4028}
4029
4030static struct notifier_block device_nb = {
4031 .notifier_call = device_notifier,
4032};
4033
Jiang Liu75f05562014-02-19 14:07:37 +08004034static int intel_iommu_memory_notifier(struct notifier_block *nb,
4035 unsigned long val, void *v)
4036{
4037 struct memory_notify *mhp = v;
4038 unsigned long long start, end;
4039 unsigned long start_vpfn, last_vpfn;
4040
4041 switch (val) {
4042 case MEM_GOING_ONLINE:
4043 start = mhp->start_pfn << PAGE_SHIFT;
4044 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4045 if (iommu_domain_identity_map(si_domain, start, end)) {
4046 pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
4047 start, end);
4048 return NOTIFY_BAD;
4049 }
4050 break;
4051
4052 case MEM_OFFLINE:
4053 case MEM_CANCEL_ONLINE:
4054 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4055 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4056 while (start_vpfn <= last_vpfn) {
4057 struct iova *iova;
4058 struct dmar_drhd_unit *drhd;
4059 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004060 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004061
4062 iova = find_iova(&si_domain->iovad, start_vpfn);
4063 if (iova == NULL) {
4064 pr_debug("dmar: failed get IOVA for PFN %lx\n",
4065 start_vpfn);
4066 break;
4067 }
4068
4069 iova = split_and_remove_iova(&si_domain->iovad, iova,
4070 start_vpfn, last_vpfn);
4071 if (iova == NULL) {
4072 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
4073 start_vpfn, last_vpfn);
4074 return NOTIFY_BAD;
4075 }
4076
David Woodhouseea8ea462014-03-05 17:09:32 +00004077 freelist = domain_unmap(si_domain, iova->pfn_lo,
4078 iova->pfn_hi);
4079
Jiang Liu75f05562014-02-19 14:07:37 +08004080 rcu_read_lock();
4081 for_each_active_iommu(iommu, drhd)
4082 iommu_flush_iotlb_psi(iommu, si_domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08004083 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004084 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004085 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004086 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004087
4088 start_vpfn = iova->pfn_hi + 1;
4089 free_iova_mem(iova);
4090 }
4091 break;
4092 }
4093
4094 return NOTIFY_OK;
4095}
4096
4097static struct notifier_block intel_iommu_memory_nb = {
4098 .notifier_call = intel_iommu_memory_notifier,
4099 .priority = 0
4100};
4101
Alex Williamsona5459cf2014-06-12 16:12:31 -06004102
4103static ssize_t intel_iommu_show_version(struct device *dev,
4104 struct device_attribute *attr,
4105 char *buf)
4106{
4107 struct intel_iommu *iommu = dev_get_drvdata(dev);
4108 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4109 return sprintf(buf, "%d:%d\n",
4110 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4111}
4112static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4113
4114static ssize_t intel_iommu_show_address(struct device *dev,
4115 struct device_attribute *attr,
4116 char *buf)
4117{
4118 struct intel_iommu *iommu = dev_get_drvdata(dev);
4119 return sprintf(buf, "%llx\n", iommu->reg_phys);
4120}
4121static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4122
4123static ssize_t intel_iommu_show_cap(struct device *dev,
4124 struct device_attribute *attr,
4125 char *buf)
4126{
4127 struct intel_iommu *iommu = dev_get_drvdata(dev);
4128 return sprintf(buf, "%llx\n", iommu->cap);
4129}
4130static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4131
4132static ssize_t intel_iommu_show_ecap(struct device *dev,
4133 struct device_attribute *attr,
4134 char *buf)
4135{
4136 struct intel_iommu *iommu = dev_get_drvdata(dev);
4137 return sprintf(buf, "%llx\n", iommu->ecap);
4138}
4139static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4140
4141static struct attribute *intel_iommu_attrs[] = {
4142 &dev_attr_version.attr,
4143 &dev_attr_address.attr,
4144 &dev_attr_cap.attr,
4145 &dev_attr_ecap.attr,
4146 NULL,
4147};
4148
4149static struct attribute_group intel_iommu_group = {
4150 .name = "intel-iommu",
4151 .attrs = intel_iommu_attrs,
4152};
4153
4154const struct attribute_group *intel_iommu_groups[] = {
4155 &intel_iommu_group,
4156 NULL,
4157};
4158
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004159int __init intel_iommu_init(void)
4160{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004161 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004162 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004163 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004164
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004165 /* VT-d is required for a TXT/tboot launch, so enforce that */
4166 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004167
Jiang Liu3a5670e2014-02-19 14:07:33 +08004168 if (iommu_init_mempool()) {
4169 if (force_on)
4170 panic("tboot: Failed to initialize iommu memory\n");
4171 return -ENOMEM;
4172 }
4173
4174 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004175 if (dmar_table_init()) {
4176 if (force_on)
4177 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004178 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004179 }
4180
Takao Indoh3a93c842013-04-23 17:35:03 +09004181 /*
4182 * Disable translation if already enabled prior to OS handover.
4183 */
Jiang Liu7c919772014-01-06 14:18:18 +08004184 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09004185 if (iommu->gcmd & DMA_GCMD_TE)
4186 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09004187
Suresh Siddhac2c72862011-08-23 17:05:19 -07004188 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004189 if (force_on)
4190 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004191 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004192 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004193
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004194 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08004195 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07004196
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004197 if (list_empty(&dmar_rmrr_units))
4198 printk(KERN_INFO "DMAR: No RMRR found\n");
4199
4200 if (list_empty(&dmar_atsr_units))
4201 printk(KERN_INFO "DMAR: No ATSR found\n");
4202
Joseph Cihula51a63e62011-03-21 11:04:24 -07004203 if (dmar_init_reserved_ranges()) {
4204 if (force_on)
4205 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004206 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004207 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004208
4209 init_no_remapping_devices();
4210
Joseph Cihulab7792602011-05-03 00:08:37 -07004211 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004212 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004213 if (force_on)
4214 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004215 printk(KERN_ERR "IOMMU: dmar init failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004216 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004217 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004218 up_write(&dmar_global_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004219 printk(KERN_INFO
4220 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4221
mark gross5e0d2a62008-03-04 15:22:08 -08004222 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004223#ifdef CONFIG_SWIOTLB
4224 swiotlb = 0;
4225#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004226 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004227
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004228 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004229
Alex Williamsona5459cf2014-06-12 16:12:31 -06004230 for_each_active_iommu(iommu, drhd)
4231 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4232 intel_iommu_groups,
4233 iommu->name);
4234
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004235 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004236 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004237 if (si_domain && !hw_pass_through)
4238 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004239
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004240 intel_iommu_enabled = 1;
4241
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004242 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004243
4244out_free_reserved_range:
4245 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004246out_free_dmar:
4247 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004248 up_write(&dmar_global_lock);
4249 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004250 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004251}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004252
Alex Williamson579305f2014-07-03 09:51:43 -06004253static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4254{
4255 struct intel_iommu *iommu = opaque;
4256
4257 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4258 return 0;
4259}
4260
4261/*
4262 * NB - intel-iommu lacks any sort of reference counting for the users of
4263 * dependent devices. If multiple endpoints have intersecting dependent
4264 * devices, unbinding the driver from any one of them will possibly leave
4265 * the others unable to operate.
4266 */
Han, Weidong3199aa62009-02-26 17:31:12 +08004267static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004268 struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004269{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004270 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004271 return;
4272
Alex Williamson579305f2014-07-03 09:51:43 -06004273 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004274}
4275
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004276static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004277 struct device *dev)
Weidong Hanc7151a82008-12-08 22:51:37 +08004278{
Yijing Wangbca2b912013-10-31 17:26:04 +08004279 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08004280 struct intel_iommu *iommu;
4281 unsigned long flags;
4282 int found = 0;
David Woodhouse156baca2014-03-09 14:00:57 -07004283 u8 bus, devfn;
Weidong Hanc7151a82008-12-08 22:51:37 +08004284
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004285 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08004286 if (!iommu)
4287 return;
4288
4289 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08004290 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004291 if (info->iommu == iommu && info->bus == bus &&
4292 info->devfn == devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01004293 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004294 spin_unlock_irqrestore(&device_domain_lock, flags);
4295
Yu Zhao93a23a72009-05-18 13:51:37 +08004296 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004297 iommu_detach_dev(iommu, info->bus, info->devfn);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004298 iommu_detach_dependent_devices(iommu, dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08004299 free_devinfo_mem(info);
4300
4301 spin_lock_irqsave(&device_domain_lock, flags);
4302
4303 if (found)
4304 break;
4305 else
4306 continue;
4307 }
4308
4309 /* if there is no other devices under the same iommu
4310 * owned by this domain, clear this iommu in iommu_bmp
4311 * update iommu count and coherency
4312 */
David Woodhouse8bbc4412014-03-09 13:52:37 -07004313 if (info->iommu == iommu)
Weidong Hanc7151a82008-12-08 22:51:37 +08004314 found = 1;
4315 }
4316
Roland Dreier3e7abe22011-07-20 06:22:21 -07004317 spin_unlock_irqrestore(&device_domain_lock, flags);
4318
Weidong Hanc7151a82008-12-08 22:51:37 +08004319 if (found == 0) {
Jiang Liufb170fb2014-07-11 14:19:28 +08004320 domain_detach_iommu(domain, iommu);
4321 if (!domain_type_is_vm_or_si(domain))
4322 iommu_detach_domain(domain, iommu);
Weidong Hanc7151a82008-12-08 22:51:37 +08004323 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004324}
4325
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004326static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08004327{
4328 int adjust_width;
4329
Robin Murphy0fb5fe82015-01-12 17:51:16 +00004330 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4331 DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004332 domain_reserve_special_ranges(domain);
4333
4334 /* calculate AGAW */
4335 domain->gaw = guest_width;
4336 adjust_width = guestwidth_to_adjustwidth(guest_width);
4337 domain->agaw = width_to_agaw(adjust_width);
4338
Weidong Han5e98c4b2008-12-08 23:03:27 +08004339 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004340 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004341 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004342 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004343
4344 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004345 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004346 if (!domain->pgd)
4347 return -ENOMEM;
4348 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4349 return 0;
4350}
4351
Joerg Roedel00a77de2015-03-26 13:43:08 +01004352static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
Kay, Allen M38717942008-09-09 18:37:29 +03004353{
Joerg Roedel5d450802008-12-03 14:52:32 +01004354 struct dmar_domain *dmar_domain;
Joerg Roedel00a77de2015-03-26 13:43:08 +01004355 struct iommu_domain *domain;
4356
4357 if (type != IOMMU_DOMAIN_UNMANAGED)
4358 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004359
Jiang Liuab8dfe22014-07-11 14:19:27 +08004360 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004361 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03004362 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004363 "intel_iommu_domain_init: dmar_domain == NULL\n");
Joerg Roedel00a77de2015-03-26 13:43:08 +01004364 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004365 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004366 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03004367 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004368 "intel_iommu_domain_init() failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004369 domain_exit(dmar_domain);
Joerg Roedel00a77de2015-03-26 13:43:08 +01004370 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004371 }
Allen Kay8140a952011-10-14 12:32:17 -07004372 domain_update_iommu_cap(dmar_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004373
Joerg Roedel00a77de2015-03-26 13:43:08 +01004374 domain = &dmar_domain->domain;
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004375 domain->geometry.aperture_start = 0;
4376 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4377 domain->geometry.force_aperture = true;
4378
Joerg Roedel00a77de2015-03-26 13:43:08 +01004379 return domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004380}
Kay, Allen M38717942008-09-09 18:37:29 +03004381
Joerg Roedel00a77de2015-03-26 13:43:08 +01004382static void intel_iommu_domain_free(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004383{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004384 domain_exit(to_dmar_domain(domain));
Kay, Allen M38717942008-09-09 18:37:29 +03004385}
Kay, Allen M38717942008-09-09 18:37:29 +03004386
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004387static int intel_iommu_attach_device(struct iommu_domain *domain,
4388 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004389{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004390 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004391 struct intel_iommu *iommu;
4392 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004393 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004394
Alex Williamsonc875d2c2014-07-03 09:57:02 -06004395 if (device_is_rmrr_locked(dev)) {
4396 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4397 return -EPERM;
4398 }
4399
David Woodhouse7207d8f2014-03-09 16:31:06 -07004400 /* normally dev is not mapped */
4401 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004402 struct dmar_domain *old_domain;
4403
David Woodhouse1525a292014-03-06 16:19:30 +00004404 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004405 if (old_domain) {
Jiang Liuab8dfe22014-07-11 14:19:27 +08004406 if (domain_type_is_vm_or_si(dmar_domain))
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004407 domain_remove_one_dev_info(old_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004408 else
4409 domain_remove_dev_info(old_domain);
Joerg Roedel62c22162014-12-09 12:56:45 +01004410
4411 if (!domain_type_is_vm_or_si(old_domain) &&
4412 list_empty(&old_domain->devices))
4413 domain_exit(old_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004414 }
4415 }
4416
David Woodhouse156baca2014-03-09 14:00:57 -07004417 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004418 if (!iommu)
4419 return -ENODEV;
4420
4421 /* check if this iommu agaw is sufficient for max mapped address */
4422 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004423 if (addr_width > cap_mgaw(iommu->cap))
4424 addr_width = cap_mgaw(iommu->cap);
4425
4426 if (dmar_domain->max_addr > (1LL << addr_width)) {
4427 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004428 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004429 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004430 return -EFAULT;
4431 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004432 dmar_domain->gaw = addr_width;
4433
4434 /*
4435 * Knock out extra levels of page tables if necessary
4436 */
4437 while (iommu->agaw < dmar_domain->agaw) {
4438 struct dma_pte *pte;
4439
4440 pte = dmar_domain->pgd;
4441 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004442 dmar_domain->pgd = (struct dma_pte *)
4443 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004444 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004445 }
4446 dmar_domain->agaw--;
4447 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004448
David Woodhouse5913c9b2014-03-09 16:27:31 -07004449 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004450}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004451
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004452static void intel_iommu_detach_device(struct iommu_domain *domain,
4453 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004454{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004455 domain_remove_one_dev_info(to_dmar_domain(domain), dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004456}
Kay, Allen M38717942008-09-09 18:37:29 +03004457
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004458static int intel_iommu_map(struct iommu_domain *domain,
4459 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004460 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004461{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004462 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004463 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004464 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004465 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004466
Joerg Roedeldde57a22008-12-03 15:04:09 +01004467 if (iommu_prot & IOMMU_READ)
4468 prot |= DMA_PTE_READ;
4469 if (iommu_prot & IOMMU_WRITE)
4470 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004471 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4472 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004473
David Woodhouse163cc522009-06-28 00:51:17 +01004474 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004475 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004476 u64 end;
4477
4478 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004479 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004480 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004481 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004482 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004483 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004484 return -EFAULT;
4485 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004486 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004487 }
David Woodhousead051222009-06-28 14:22:28 +01004488 /* Round up size to next multiple of PAGE_SIZE, if it and
4489 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004490 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004491 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4492 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004493 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004494}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004495
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004496static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004497 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004498{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004499 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
David Woodhouseea8ea462014-03-05 17:09:32 +00004500 struct page *freelist = NULL;
4501 struct intel_iommu *iommu;
4502 unsigned long start_pfn, last_pfn;
4503 unsigned int npages;
4504 int iommu_id, num, ndomains, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004505
David Woodhouse5cf0a762014-03-19 16:07:49 +00004506 /* Cope with horrid API which requires us to unmap more than the
4507 size argument if it happens to be a large-page mapping. */
4508 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4509 BUG();
4510
4511 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4512 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4513
David Woodhouseea8ea462014-03-05 17:09:32 +00004514 start_pfn = iova >> VTD_PAGE_SHIFT;
4515 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4516
4517 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4518
4519 npages = last_pfn - start_pfn + 1;
4520
4521 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4522 iommu = g_iommus[iommu_id];
4523
4524 /*
4525 * find bit position of dmar_domain
4526 */
4527 ndomains = cap_ndoms(iommu->cap);
4528 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4529 if (iommu->domains[num] == dmar_domain)
4530 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4531 npages, !freelist, 0);
4532 }
4533
4534 }
4535
4536 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004537
David Woodhouse163cc522009-06-28 00:51:17 +01004538 if (dmar_domain->max_addr == iova + size)
4539 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004540
David Woodhouse5cf0a762014-03-19 16:07:49 +00004541 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004542}
Kay, Allen M38717942008-09-09 18:37:29 +03004543
Joerg Roedeld14d6572008-12-03 15:06:57 +01004544static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05304545 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004546{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004547 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004548 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004549 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004550 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004551
David Woodhouse5cf0a762014-03-19 16:07:49 +00004552 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004553 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004554 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004555
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004556 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004557}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004558
Joerg Roedel5d587b82014-09-05 10:50:45 +02004559static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004560{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004561 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004562 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04004563 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004564 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004565
Joerg Roedel5d587b82014-09-05 10:50:45 +02004566 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004567}
4568
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004569static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004570{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004571 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004572 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07004573 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004574
Alex Williamsona5459cf2014-06-12 16:12:31 -06004575 iommu = device_to_iommu(dev, &bus, &devfn);
4576 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004577 return -ENODEV;
4578
Alex Williamsona5459cf2014-06-12 16:12:31 -06004579 iommu_device_link(iommu->iommu_dev, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004580
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004581 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06004582
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004583 if (IS_ERR(group))
4584 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004585
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004586 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004587 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004588}
4589
4590static void intel_iommu_remove_device(struct device *dev)
4591{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004592 struct intel_iommu *iommu;
4593 u8 bus, devfn;
4594
4595 iommu = device_to_iommu(dev, &bus, &devfn);
4596 if (!iommu)
4597 return;
4598
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004599 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004600
4601 iommu_device_unlink(iommu->iommu_dev, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004602}
4603
Thierry Redingb22f6432014-06-27 09:03:12 +02004604static const struct iommu_ops intel_iommu_ops = {
Joerg Roedel5d587b82014-09-05 10:50:45 +02004605 .capable = intel_iommu_capable,
Joerg Roedel00a77de2015-03-26 13:43:08 +01004606 .domain_alloc = intel_iommu_domain_alloc,
4607 .domain_free = intel_iommu_domain_free,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004608 .attach_dev = intel_iommu_attach_device,
4609 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004610 .map = intel_iommu_map,
4611 .unmap = intel_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07004612 .map_sg = default_iommu_map_sg,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004613 .iova_to_phys = intel_iommu_iova_to_phys,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004614 .add_device = intel_iommu_add_device,
4615 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004616 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004617};
David Woodhouse9af88142009-02-13 23:18:03 +00004618
Daniel Vetter94526182013-01-20 23:50:13 +01004619static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4620{
4621 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4622 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4623 dmar_map_gfx = 0;
4624}
4625
4626DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4627DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4628DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4629DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4630DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4631DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4632DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4633
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004634static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004635{
4636 /*
4637 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004638 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004639 */
4640 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4641 rwbf_quirk = 1;
4642}
4643
4644DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004645DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4646DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4647DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4648DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4649DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4650DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004651
Adam Jacksoneecfd572010-08-25 21:17:34 +01004652#define GGC 0x52
4653#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4654#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4655#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4656#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4657#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4658#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4659#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4660#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4661
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004662static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004663{
4664 unsigned short ggc;
4665
Adam Jacksoneecfd572010-08-25 21:17:34 +01004666 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004667 return;
4668
Adam Jacksoneecfd572010-08-25 21:17:34 +01004669 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004670 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4671 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004672 } else if (dmar_map_gfx) {
4673 /* we have to ensure the gfx device is idle before we flush */
4674 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4675 intel_iommu_strict = 1;
4676 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004677}
4678DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4679DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4680DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4681DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4682
David Woodhousee0fc7e02009-09-30 09:12:17 -07004683/* On Tylersburg chipsets, some BIOSes have been known to enable the
4684 ISOCH DMAR unit for the Azalia sound device, but not give it any
4685 TLB entries, which causes it to deadlock. Check for that. We do
4686 this in a function called from init_dmars(), instead of in a PCI
4687 quirk, because we don't want to print the obnoxious "BIOS broken"
4688 message if VT-d is actually disabled.
4689*/
4690static void __init check_tylersburg_isoch(void)
4691{
4692 struct pci_dev *pdev;
4693 uint32_t vtisochctrl;
4694
4695 /* If there's no Azalia in the system anyway, forget it. */
4696 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4697 if (!pdev)
4698 return;
4699 pci_dev_put(pdev);
4700
4701 /* System Management Registers. Might be hidden, in which case
4702 we can't do the sanity check. But that's OK, because the
4703 known-broken BIOSes _don't_ actually hide it, so far. */
4704 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4705 if (!pdev)
4706 return;
4707
4708 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4709 pci_dev_put(pdev);
4710 return;
4711 }
4712
4713 pci_dev_put(pdev);
4714
4715 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4716 if (vtisochctrl & 1)
4717 return;
4718
4719 /* Drop all bits other than the number of TLB entries */
4720 vtisochctrl &= 0x1c;
4721
4722 /* If we have the recommended number of TLB entries (16), fine. */
4723 if (vtisochctrl == 0x10)
4724 return;
4725
4726 /* Zero TLB entries? You get to ride the short bus to school. */
4727 if (!vtisochctrl) {
4728 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4729 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4730 dmi_get_system_info(DMI_BIOS_VENDOR),
4731 dmi_get_system_info(DMI_BIOS_VERSION),
4732 dmi_get_system_info(DMI_PRODUCT_VERSION));
4733 iommu_identity_mapping |= IDENTMAP_AZALIA;
4734 return;
4735 }
4736
4737 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4738 vtisochctrl);
4739}