blob: ae4c1a854e57896fc64e33668369bebc23f70945 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070018 */
19
20#include <linux/init.h>
21#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080022#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040023#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070024#include <linux/slab.h>
25#include <linux/irq.h>
26#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/spinlock.h>
28#include <linux/pci.h>
29#include <linux/dmar.h>
30#include <linux/dma-mapping.h>
31#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080032#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080033#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030034#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010035#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010037#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100039#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020040#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080041#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070042#include <linux/dma-contiguous.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070043#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070044#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090045#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046
Joerg Roedel078e1ee2012-09-26 12:44:43 +020047#include "irq_remapping.h"
48
Fenghua Yu5b6985c2008-10-16 18:02:32 -070049#define ROOT_SIZE VTD_PAGE_SIZE
50#define CONTEXT_SIZE VTD_PAGE_SIZE
51
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070054#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055
56#define IOAPIC_RANGE_START (0xfee00000)
57#define IOAPIC_RANGE_END (0xfeefffff)
58#define IOVA_START_ADDR (0x1000)
59
60#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
61
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070062#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080063#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070064
David Woodhouse2ebe3152009-09-19 07:34:04 -070065#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
66#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
67
68/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
69 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
70#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
71 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
72#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070073
Robin Murphy1b722502015-01-12 17:51:15 +000074/* IO virtual address start page frame number */
75#define IOVA_START_PFN (1)
76
Mark McLoughlinf27be032008-11-20 15:49:43 +000077#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070078#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070079#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080080
Andrew Mortondf08cdc2010-09-22 13:05:11 -070081/* page table handling */
82#define LEVEL_STRIDE (9)
83#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
84
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020085/*
86 * This bitmap is used to advertise the page sizes our hardware support
87 * to the IOMMU core, which will then use this information to split
88 * physically contiguous memory regions it is mapping into page sizes
89 * that we support.
90 *
91 * Traditionally the IOMMU core just handed us the mappings directly,
92 * after making sure the size is an order of a 4KiB page and that the
93 * mapping has natural alignment.
94 *
95 * To retain this behavior, we currently advertise that we support
96 * all page sizes that are an order of 4KiB.
97 *
98 * If at some point we'd like to utilize the IOMMU core's new behavior,
99 * we could change this to advertise the real page sizes we support.
100 */
101#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
102
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700103static inline int agaw_to_level(int agaw)
104{
105 return agaw + 2;
106}
107
108static inline int agaw_to_width(int agaw)
109{
Jiang Liu5c645b32014-01-06 14:18:12 +0800110 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700111}
112
113static inline int width_to_agaw(int width)
114{
Jiang Liu5c645b32014-01-06 14:18:12 +0800115 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700116}
117
118static inline unsigned int level_to_offset_bits(int level)
119{
120 return (level - 1) * LEVEL_STRIDE;
121}
122
123static inline int pfn_level_offset(unsigned long pfn, int level)
124{
125 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
126}
127
128static inline unsigned long level_mask(int level)
129{
130 return -1UL << level_to_offset_bits(level);
131}
132
133static inline unsigned long level_size(int level)
134{
135 return 1UL << level_to_offset_bits(level);
136}
137
138static inline unsigned long align_to_level(unsigned long pfn, int level)
139{
140 return (pfn + level_size(level) - 1) & level_mask(level);
141}
David Woodhousefd18de52009-05-10 23:57:41 +0100142
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100143static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
144{
Jiang Liu5c645b32014-01-06 14:18:12 +0800145 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100146}
147
David Woodhousedd4e8312009-06-27 16:21:20 +0100148/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
149 are never going to work. */
150static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
151{
152 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
153}
154
155static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
156{
157 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
158}
159static inline unsigned long page_to_dma_pfn(struct page *pg)
160{
161 return mm_to_dma_pfn(page_to_pfn(pg));
162}
163static inline unsigned long virt_to_dma_pfn(void *p)
164{
165 return page_to_dma_pfn(virt_to_page(p));
166}
167
Weidong Hand9630fe2008-12-08 11:06:32 +0800168/* global iommu list, set NULL for ignored DMAR units */
169static struct intel_iommu **g_iommus;
170
David Woodhousee0fc7e02009-09-30 09:12:17 -0700171static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000172static int rwbf_quirk;
173
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000174/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700175 * set to 1 to panic kernel if can't successfully enable VT-d
176 * (used when kernel is launched w/ TXT)
177 */
178static int force_on = 0;
179
180/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000181 * 0: Present
182 * 1-11: Reserved
183 * 12-63: Context Ptr (12 - (haw-1))
184 * 64-127: Reserved
185 */
186struct root_entry {
187 u64 val;
188 u64 rsvd1;
189};
190#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
191static inline bool root_present(struct root_entry *root)
192{
193 return (root->val & 1);
194}
195static inline void set_root_present(struct root_entry *root)
196{
197 root->val |= 1;
198}
199static inline void set_root_value(struct root_entry *root, unsigned long value)
200{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800201 root->val &= ~VTD_PAGE_MASK;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000202 root->val |= value & VTD_PAGE_MASK;
203}
204
205static inline struct context_entry *
206get_context_addr_from_root(struct root_entry *root)
207{
208 return (struct context_entry *)
209 (root_present(root)?phys_to_virt(
210 root->val & VTD_PAGE_MASK) :
211 NULL);
212}
213
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000214/*
215 * low 64 bits:
216 * 0: present
217 * 1: fault processing disable
218 * 2-3: translation type
219 * 12-63: address space root
220 * high 64 bits:
221 * 0-2: address width
222 * 3-6: aval
223 * 8-23: domain id
224 */
225struct context_entry {
226 u64 lo;
227 u64 hi;
228};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000229
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000230static inline bool context_present(struct context_entry *context)
231{
232 return (context->lo & 1);
233}
234static inline void context_set_present(struct context_entry *context)
235{
236 context->lo |= 1;
237}
238
239static inline void context_set_fault_enable(struct context_entry *context)
240{
241 context->lo &= (((u64)-1) << 2) | 1;
242}
243
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000244static inline void context_set_translation_type(struct context_entry *context,
245 unsigned long value)
246{
247 context->lo &= (((u64)-1) << 4) | 3;
248 context->lo |= (value & 3) << 2;
249}
250
251static inline void context_set_address_root(struct context_entry *context,
252 unsigned long value)
253{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800254 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000255 context->lo |= value & VTD_PAGE_MASK;
256}
257
258static inline void context_set_address_width(struct context_entry *context,
259 unsigned long value)
260{
261 context->hi |= value & 7;
262}
263
264static inline void context_set_domain_id(struct context_entry *context,
265 unsigned long value)
266{
267 context->hi |= (value & ((1 << 16) - 1)) << 8;
268}
269
270static inline void context_clear_entry(struct context_entry *context)
271{
272 context->lo = 0;
273 context->hi = 0;
274}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000275
Mark McLoughlin622ba122008-11-20 15:49:46 +0000276/*
277 * 0: readable
278 * 1: writable
279 * 2-6: reserved
280 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800281 * 8-10: available
282 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000283 * 12-63: Host physcial address
284 */
285struct dma_pte {
286 u64 val;
287};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000288
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000289static inline void dma_clear_pte(struct dma_pte *pte)
290{
291 pte->val = 0;
292}
293
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000294static inline u64 dma_pte_addr(struct dma_pte *pte)
295{
David Woodhousec85994e2009-07-01 19:21:24 +0100296#ifdef CONFIG_64BIT
297 return pte->val & VTD_PAGE_MASK;
298#else
299 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100300 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100301#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000302}
303
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000304static inline bool dma_pte_present(struct dma_pte *pte)
305{
306 return (pte->val & 3) != 0;
307}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000308
Allen Kay4399c8b2011-10-14 12:32:46 -0700309static inline bool dma_pte_superpage(struct dma_pte *pte)
310{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200311 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700312}
313
David Woodhouse75e6bf92009-07-02 11:21:16 +0100314static inline int first_pte_in_page(struct dma_pte *pte)
315{
316 return !((unsigned long)pte & ~VTD_PAGE_MASK);
317}
318
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700319/*
320 * This domain is a statically identity mapping domain.
321 * 1. This domain creats a static 1:1 mapping to all usable memory.
322 * 2. It maps to each iommu if successful.
323 * 3. Each iommu mapps to this domain if successful.
324 */
David Woodhouse19943b02009-08-04 16:19:20 +0100325static struct dmar_domain *si_domain;
326static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700327
Weidong Han1ce28fe2008-12-08 16:35:39 +0800328/* domain represents a virtual machine, more than one devices
329 * across iommus may be owned in one domain, e.g. kvm guest.
330 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800331#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800332
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700333/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800334#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700335
Mark McLoughlin99126f72008-11-20 15:49:47 +0000336struct dmar_domain {
337 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700338 int nid; /* node id */
Jiang Liu78d8e702014-11-09 22:47:57 +0800339 DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
Mike Travis1b198bb2012-03-05 15:05:16 -0800340 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000341
342 struct list_head devices; /* all devices' list */
343 struct iova_domain iovad; /* iova's that belong to this domain */
344
345 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000346 int gaw; /* max guest address width */
347
348 /* adjusted guest address width, 0 is level 2 30-bit */
349 int agaw;
350
Weidong Han3b5410e2008-12-08 09:17:15 +0800351 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800352
353 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800354 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800355 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100356 int iommu_superpage;/* Level of superpages supported:
357 0 == 4KiB (no superpages), 1 == 2MiB,
358 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800359 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800360 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000361};
362
Mark McLoughlina647dac2008-11-20 15:49:48 +0000363/* PCI domain-device relationship */
364struct device_domain_info {
365 struct list_head link; /* link to domain siblings */
366 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100367 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000368 u8 devfn; /* PCI devfn number */
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000369 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800370 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000371 struct dmar_domain *domain; /* pointer to domain */
372};
373
Jiang Liub94e4112014-02-19 14:07:25 +0800374struct dmar_rmrr_unit {
375 struct list_head list; /* list of rmrr units */
376 struct acpi_dmar_header *hdr; /* ACPI header */
377 u64 base_address; /* reserved base address*/
378 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000379 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800380 int devices_cnt; /* target device count */
381};
382
383struct dmar_atsr_unit {
384 struct list_head list; /* list of ATSR units */
385 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000386 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800387 int devices_cnt; /* target device count */
388 u8 include_all:1; /* include all ports */
389};
390
391static LIST_HEAD(dmar_atsr_units);
392static LIST_HEAD(dmar_rmrr_units);
393
394#define for_each_rmrr_units(rmrr) \
395 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
396
mark gross5e0d2a62008-03-04 15:22:08 -0800397static void flush_unmaps_timeout(unsigned long data);
398
Jiang Liub707cb02014-01-06 14:18:26 +0800399static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800400
mark gross80b20dd2008-04-18 13:53:58 -0700401#define HIGH_WATER_MARK 250
402struct deferred_flush_tables {
403 int next;
404 struct iova *iova[HIGH_WATER_MARK];
405 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000406 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700407};
408
409static struct deferred_flush_tables *deferred_flush;
410
mark gross5e0d2a62008-03-04 15:22:08 -0800411/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800412static int g_num_of_iommus;
413
414static DEFINE_SPINLOCK(async_umap_flush_lock);
415static LIST_HEAD(unmaps_to_do);
416
417static int timer_on;
418static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800419
Jiang Liu92d03cc2014-02-19 14:07:28 +0800420static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700421static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800422static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -0700423 struct device *dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800424static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000425 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800426static int domain_detach_iommu(struct dmar_domain *domain,
427 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700428
Suresh Siddhad3f13812011-08-23 17:05:25 -0700429#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800430int dmar_disabled = 0;
431#else
432int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700433#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800434
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200435int intel_iommu_enabled = 0;
436EXPORT_SYMBOL_GPL(intel_iommu_enabled);
437
David Woodhouse2d9e6672010-06-15 10:57:57 +0100438static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700439static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800440static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100441static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700442
David Woodhousec0771df2011-10-14 20:59:46 +0100443int intel_iommu_gfx_mapped;
444EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
445
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700446#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
447static DEFINE_SPINLOCK(device_domain_lock);
448static LIST_HEAD(device_domain_list);
449
Thierry Redingb22f6432014-06-27 09:03:12 +0200450static const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100451
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700452static int __init intel_iommu_setup(char *str)
453{
454 if (!str)
455 return -EINVAL;
456 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800457 if (!strncmp(str, "on", 2)) {
458 dmar_disabled = 0;
459 printk(KERN_INFO "Intel-IOMMU: enabled\n");
460 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700461 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800462 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700463 } else if (!strncmp(str, "igfx_off", 8)) {
464 dmar_map_gfx = 0;
465 printk(KERN_INFO
466 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700467 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800468 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700469 "Intel-IOMMU: Forcing DAC for PCI devices\n");
470 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800471 } else if (!strncmp(str, "strict", 6)) {
472 printk(KERN_INFO
473 "Intel-IOMMU: disable batched IOTLB flush\n");
474 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100475 } else if (!strncmp(str, "sp_off", 6)) {
476 printk(KERN_INFO
477 "Intel-IOMMU: disable supported super page\n");
478 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700479 }
480
481 str += strcspn(str, ",");
482 while (*str == ',')
483 str++;
484 }
485 return 0;
486}
487__setup("intel_iommu=", intel_iommu_setup);
488
489static struct kmem_cache *iommu_domain_cache;
490static struct kmem_cache *iommu_devinfo_cache;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700491
Suresh Siddha4c923d42009-10-02 11:01:24 -0700492static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700493{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700494 struct page *page;
495 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700496
Suresh Siddha4c923d42009-10-02 11:01:24 -0700497 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
498 if (page)
499 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700500 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700501}
502
503static inline void free_pgtable_page(void *vaddr)
504{
505 free_page((unsigned long)vaddr);
506}
507
508static inline void *alloc_domain_mem(void)
509{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900510 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700511}
512
Kay, Allen M38717942008-09-09 18:37:29 +0300513static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700514{
515 kmem_cache_free(iommu_domain_cache, vaddr);
516}
517
518static inline void * alloc_devinfo_mem(void)
519{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900520 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700521}
522
523static inline void free_devinfo_mem(void *vaddr)
524{
525 kmem_cache_free(iommu_devinfo_cache, vaddr);
526}
527
Jiang Liuab8dfe22014-07-11 14:19:27 +0800528static inline int domain_type_is_vm(struct dmar_domain *domain)
529{
530 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
531}
532
533static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
534{
535 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
536 DOMAIN_FLAG_STATIC_IDENTITY);
537}
Weidong Han1b573682008-12-08 15:34:06 +0800538
Jiang Liu162d1b12014-07-11 14:19:35 +0800539static inline int domain_pfn_supported(struct dmar_domain *domain,
540 unsigned long pfn)
541{
542 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
543
544 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
545}
546
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700547static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800548{
549 unsigned long sagaw;
550 int agaw = -1;
551
552 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700553 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800554 agaw >= 0; agaw--) {
555 if (test_bit(agaw, &sagaw))
556 break;
557 }
558
559 return agaw;
560}
561
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700562/*
563 * Calculate max SAGAW for each iommu.
564 */
565int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
566{
567 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
568}
569
570/*
571 * calculate agaw for each iommu.
572 * "SAGAW" may be different across iommus, use a default agaw, and
573 * get a supported less agaw for iommus that don't support the default agaw.
574 */
575int iommu_calculate_agaw(struct intel_iommu *iommu)
576{
577 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
578}
579
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700580/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800581static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
582{
583 int iommu_id;
584
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700585 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800586 BUG_ON(domain_type_is_vm_or_si(domain));
Mike Travis1b198bb2012-03-05 15:05:16 -0800587 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800588 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
589 return NULL;
590
591 return g_iommus[iommu_id];
592}
593
Weidong Han8e6040972008-12-08 15:49:06 +0800594static void domain_update_iommu_coherency(struct dmar_domain *domain)
595{
David Woodhoused0501962014-03-11 17:10:29 -0700596 struct dmar_drhd_unit *drhd;
597 struct intel_iommu *iommu;
598 int i, found = 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800599
David Woodhoused0501962014-03-11 17:10:29 -0700600 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800601
Mike Travis1b198bb2012-03-05 15:05:16 -0800602 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
David Woodhoused0501962014-03-11 17:10:29 -0700603 found = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800604 if (!ecap_coherent(g_iommus[i]->ecap)) {
605 domain->iommu_coherency = 0;
606 break;
607 }
Weidong Han8e6040972008-12-08 15:49:06 +0800608 }
David Woodhoused0501962014-03-11 17:10:29 -0700609 if (found)
610 return;
611
612 /* No hardware attached; use lowest common denominator */
613 rcu_read_lock();
614 for_each_active_iommu(iommu, drhd) {
615 if (!ecap_coherent(iommu->ecap)) {
616 domain->iommu_coherency = 0;
617 break;
618 }
619 }
620 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800621}
622
Jiang Liu161f6932014-07-11 14:19:37 +0800623static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100624{
Allen Kay8140a952011-10-14 12:32:17 -0700625 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800626 struct intel_iommu *iommu;
627 int ret = 1;
628
629 rcu_read_lock();
630 for_each_active_iommu(iommu, drhd) {
631 if (iommu != skip) {
632 if (!ecap_sc_support(iommu->ecap)) {
633 ret = 0;
634 break;
635 }
636 }
637 }
638 rcu_read_unlock();
639
640 return ret;
641}
642
643static int domain_update_iommu_superpage(struct intel_iommu *skip)
644{
645 struct dmar_drhd_unit *drhd;
646 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700647 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100648
649 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800650 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100651 }
652
Allen Kay8140a952011-10-14 12:32:17 -0700653 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800654 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700655 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800656 if (iommu != skip) {
657 mask &= cap_super_page_val(iommu->cap);
658 if (!mask)
659 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100660 }
661 }
Jiang Liu0e242612014-02-19 14:07:34 +0800662 rcu_read_unlock();
663
Jiang Liu161f6932014-07-11 14:19:37 +0800664 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100665}
666
Sheng Yang58c610b2009-03-18 15:33:05 +0800667/* Some capabilities may be different across iommus */
668static void domain_update_iommu_cap(struct dmar_domain *domain)
669{
670 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800671 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
672 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800673}
674
David Woodhouse156baca2014-03-09 14:00:57 -0700675static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800676{
677 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800678 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700679 struct device *tmp;
680 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800681 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800682 int i;
683
David Woodhouse156baca2014-03-09 14:00:57 -0700684 if (dev_is_pci(dev)) {
685 pdev = to_pci_dev(dev);
686 segment = pci_domain_nr(pdev->bus);
687 } else if (ACPI_COMPANION(dev))
688 dev = &ACPI_COMPANION(dev)->dev;
689
Jiang Liu0e242612014-02-19 14:07:34 +0800690 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800691 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700692 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100693 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800694
Jiang Liub683b232014-02-19 14:07:32 +0800695 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700696 drhd->devices_cnt, i, tmp) {
697 if (tmp == dev) {
698 *bus = drhd->devices[i].bus;
699 *devfn = drhd->devices[i].devfn;
700 goto out;
701 }
702
703 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000704 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700705
706 ptmp = to_pci_dev(tmp);
707 if (ptmp->subordinate &&
708 ptmp->subordinate->number <= pdev->bus->number &&
709 ptmp->subordinate->busn_res.end >= pdev->bus->number)
710 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100711 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800712
David Woodhouse156baca2014-03-09 14:00:57 -0700713 if (pdev && drhd->include_all) {
714 got_pdev:
715 *bus = pdev->bus->number;
716 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800717 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700718 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800719 }
Jiang Liub683b232014-02-19 14:07:32 +0800720 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700721 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800722 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800723
Jiang Liub683b232014-02-19 14:07:32 +0800724 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800725}
726
Weidong Han5331fe62008-12-08 23:00:00 +0800727static void domain_flush_cache(struct dmar_domain *domain,
728 void *addr, int size)
729{
730 if (!domain->iommu_coherency)
731 clflush_cache_range(addr, size);
732}
733
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700734/* Gets context entry for a given bus and devfn */
735static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
736 u8 bus, u8 devfn)
737{
738 struct root_entry *root;
739 struct context_entry *context;
740 unsigned long phy_addr;
741 unsigned long flags;
742
743 spin_lock_irqsave(&iommu->lock, flags);
744 root = &iommu->root_entry[bus];
745 context = get_context_addr_from_root(root);
746 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700747 context = (struct context_entry *)
748 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700749 if (!context) {
750 spin_unlock_irqrestore(&iommu->lock, flags);
751 return NULL;
752 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700753 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700754 phy_addr = virt_to_phys((void *)context);
755 set_root_value(root, phy_addr);
756 set_root_present(root);
757 __iommu_flush_cache(iommu, root, sizeof(*root));
758 }
759 spin_unlock_irqrestore(&iommu->lock, flags);
760 return &context[devfn];
761}
762
763static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
764{
765 struct root_entry *root;
766 struct context_entry *context;
767 int ret;
768 unsigned long flags;
769
770 spin_lock_irqsave(&iommu->lock, flags);
771 root = &iommu->root_entry[bus];
772 context = get_context_addr_from_root(root);
773 if (!context) {
774 ret = 0;
775 goto out;
776 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000777 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700778out:
779 spin_unlock_irqrestore(&iommu->lock, flags);
780 return ret;
781}
782
783static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
784{
785 struct root_entry *root;
786 struct context_entry *context;
787 unsigned long flags;
788
789 spin_lock_irqsave(&iommu->lock, flags);
790 root = &iommu->root_entry[bus];
791 context = get_context_addr_from_root(root);
792 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000793 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700794 __iommu_flush_cache(iommu, &context[devfn], \
795 sizeof(*context));
796 }
797 spin_unlock_irqrestore(&iommu->lock, flags);
798}
799
800static void free_context_table(struct intel_iommu *iommu)
801{
802 struct root_entry *root;
803 int i;
804 unsigned long flags;
805 struct context_entry *context;
806
807 spin_lock_irqsave(&iommu->lock, flags);
808 if (!iommu->root_entry) {
809 goto out;
810 }
811 for (i = 0; i < ROOT_ENTRY_NR; i++) {
812 root = &iommu->root_entry[i];
813 context = get_context_addr_from_root(root);
814 if (context)
815 free_pgtable_page(context);
816 }
817 free_pgtable_page(iommu->root_entry);
818 iommu->root_entry = NULL;
819out:
820 spin_unlock_irqrestore(&iommu->lock, flags);
821}
822
David Woodhouseb026fd22009-06-28 10:37:25 +0100823static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000824 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700825{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700826 struct dma_pte *parent, *pte = NULL;
827 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700828 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700829
830 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200831
Jiang Liu162d1b12014-07-11 14:19:35 +0800832 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200833 /* Address beyond IOMMU's addressing capabilities. */
834 return NULL;
835
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700836 parent = domain->pgd;
837
David Woodhouse5cf0a762014-03-19 16:07:49 +0000838 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700839 void *tmp_page;
840
David Woodhouseb026fd22009-06-28 10:37:25 +0100841 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700842 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000843 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100844 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000845 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700846 break;
847
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000848 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100849 uint64_t pteval;
850
Suresh Siddha4c923d42009-10-02 11:01:24 -0700851 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700852
David Woodhouse206a73c2009-07-01 19:30:28 +0100853 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700854 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +0100855
David Woodhousec85994e2009-07-01 19:21:24 +0100856 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400857 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +0800858 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +0100859 /* Someone else set it while we were thinking; use theirs. */
860 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +0800861 else
David Woodhousec85994e2009-07-01 19:21:24 +0100862 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700863 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000864 if (level == 1)
865 break;
866
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000867 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700868 level--;
869 }
870
David Woodhouse5cf0a762014-03-19 16:07:49 +0000871 if (!*target_level)
872 *target_level = level;
873
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700874 return pte;
875}
876
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100877
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700878/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100879static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
880 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100881 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700882{
883 struct dma_pte *parent, *pte = NULL;
884 int total = agaw_to_level(domain->agaw);
885 int offset;
886
887 parent = domain->pgd;
888 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100889 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700890 pte = &parent[offset];
891 if (level == total)
892 return pte;
893
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100894 if (!dma_pte_present(pte)) {
895 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700896 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100897 }
898
Yijing Wange16922a2014-05-20 20:37:51 +0800899 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100900 *large_page = total;
901 return pte;
902 }
903
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000904 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700905 total--;
906 }
907 return NULL;
908}
909
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700910/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +0000911static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100912 unsigned long start_pfn,
913 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700914{
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100915 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100916 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700917
Jiang Liu162d1b12014-07-11 14:19:35 +0800918 BUG_ON(!domain_pfn_supported(domain, start_pfn));
919 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700920 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100921
David Woodhouse04b18e62009-06-27 19:15:01 +0100922 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700923 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100924 large_page = 1;
925 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100926 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100927 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100928 continue;
929 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100930 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100931 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100932 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100933 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100934 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
935
David Woodhouse310a5ab2009-06-28 18:52:20 +0100936 domain_flush_cache(domain, first_pte,
937 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700938
939 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700940}
941
Alex Williamson3269ee02013-06-15 10:27:19 -0600942static void dma_pte_free_level(struct dmar_domain *domain, int level,
943 struct dma_pte *pte, unsigned long pfn,
944 unsigned long start_pfn, unsigned long last_pfn)
945{
946 pfn = max(start_pfn, pfn);
947 pte = &pte[pfn_level_offset(pfn, level)];
948
949 do {
950 unsigned long level_pfn;
951 struct dma_pte *level_pte;
952
953 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
954 goto next;
955
956 level_pfn = pfn & level_mask(level - 1);
957 level_pte = phys_to_virt(dma_pte_addr(pte));
958
959 if (level > 2)
960 dma_pte_free_level(domain, level - 1, level_pte,
961 level_pfn, start_pfn, last_pfn);
962
963 /* If range covers entire pagetable, free it */
964 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -0800965 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -0600966 dma_clear_pte(pte);
967 domain_flush_cache(domain, pte, sizeof(*pte));
968 free_pgtable_page(level_pte);
969 }
970next:
971 pfn += level_size(level);
972 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
973}
974
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700975/* free page table pages. last level pte should already be cleared */
976static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100977 unsigned long start_pfn,
978 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700979{
Jiang Liu162d1b12014-07-11 14:19:35 +0800980 BUG_ON(!domain_pfn_supported(domain, start_pfn));
981 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700982 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700983
Jiang Liud41a4ad2014-07-11 14:19:34 +0800984 dma_pte_clear_range(domain, start_pfn, last_pfn);
985
David Woodhousef3a0a522009-06-30 03:40:07 +0100986 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -0600987 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
988 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +0100989
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700990 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100991 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700992 free_pgtable_page(domain->pgd);
993 domain->pgd = NULL;
994 }
995}
996
David Woodhouseea8ea462014-03-05 17:09:32 +0000997/* When a page at a given level is being unlinked from its parent, we don't
998 need to *modify* it at all. All we need to do is make a list of all the
999 pages which can be freed just as soon as we've flushed the IOTLB and we
1000 know the hardware page-walk will no longer touch them.
1001 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1002 be freed. */
1003static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1004 int level, struct dma_pte *pte,
1005 struct page *freelist)
1006{
1007 struct page *pg;
1008
1009 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1010 pg->freelist = freelist;
1011 freelist = pg;
1012
1013 if (level == 1)
1014 return freelist;
1015
Jiang Liuadeb25902014-04-09 10:20:39 +08001016 pte = page_address(pg);
1017 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001018 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1019 freelist = dma_pte_list_pagetables(domain, level - 1,
1020 pte, freelist);
Jiang Liuadeb25902014-04-09 10:20:39 +08001021 pte++;
1022 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001023
1024 return freelist;
1025}
1026
1027static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1028 struct dma_pte *pte, unsigned long pfn,
1029 unsigned long start_pfn,
1030 unsigned long last_pfn,
1031 struct page *freelist)
1032{
1033 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1034
1035 pfn = max(start_pfn, pfn);
1036 pte = &pte[pfn_level_offset(pfn, level)];
1037
1038 do {
1039 unsigned long level_pfn;
1040
1041 if (!dma_pte_present(pte))
1042 goto next;
1043
1044 level_pfn = pfn & level_mask(level);
1045
1046 /* If range covers entire pagetable, free it */
1047 if (start_pfn <= level_pfn &&
1048 last_pfn >= level_pfn + level_size(level) - 1) {
1049 /* These suborbinate page tables are going away entirely. Don't
1050 bother to clear them; we're just going to *free* them. */
1051 if (level > 1 && !dma_pte_superpage(pte))
1052 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1053
1054 dma_clear_pte(pte);
1055 if (!first_pte)
1056 first_pte = pte;
1057 last_pte = pte;
1058 } else if (level > 1) {
1059 /* Recurse down into a level that isn't *entirely* obsolete */
1060 freelist = dma_pte_clear_level(domain, level - 1,
1061 phys_to_virt(dma_pte_addr(pte)),
1062 level_pfn, start_pfn, last_pfn,
1063 freelist);
1064 }
1065next:
1066 pfn += level_size(level);
1067 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1068
1069 if (first_pte)
1070 domain_flush_cache(domain, first_pte,
1071 (void *)++last_pte - (void *)first_pte);
1072
1073 return freelist;
1074}
1075
1076/* We can't just free the pages because the IOMMU may still be walking
1077 the page tables, and may have cached the intermediate levels. The
1078 pages can only be freed after the IOTLB flush has been done. */
1079struct page *domain_unmap(struct dmar_domain *domain,
1080 unsigned long start_pfn,
1081 unsigned long last_pfn)
1082{
David Woodhouseea8ea462014-03-05 17:09:32 +00001083 struct page *freelist = NULL;
1084
Jiang Liu162d1b12014-07-11 14:19:35 +08001085 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1086 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001087 BUG_ON(start_pfn > last_pfn);
1088
1089 /* we don't need lock here; nobody else touches the iova range */
1090 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1091 domain->pgd, 0, start_pfn, last_pfn, NULL);
1092
1093 /* free pgd */
1094 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1095 struct page *pgd_page = virt_to_page(domain->pgd);
1096 pgd_page->freelist = freelist;
1097 freelist = pgd_page;
1098
1099 domain->pgd = NULL;
1100 }
1101
1102 return freelist;
1103}
1104
1105void dma_free_pagelist(struct page *freelist)
1106{
1107 struct page *pg;
1108
1109 while ((pg = freelist)) {
1110 freelist = pg->freelist;
1111 free_pgtable_page(page_address(pg));
1112 }
1113}
1114
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001115/* iommu handling */
1116static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1117{
1118 struct root_entry *root;
1119 unsigned long flags;
1120
Suresh Siddha4c923d42009-10-02 11:01:24 -07001121 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001122 if (!root) {
1123 pr_err("IOMMU: allocating root entry for %s failed\n",
1124 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001125 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001126 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001127
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001128 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001129
1130 spin_lock_irqsave(&iommu->lock, flags);
1131 iommu->root_entry = root;
1132 spin_unlock_irqrestore(&iommu->lock, flags);
1133
1134 return 0;
1135}
1136
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001137static void iommu_set_root_entry(struct intel_iommu *iommu)
1138{
1139 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001140 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001141 unsigned long flag;
1142
1143 addr = iommu->root_entry;
1144
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001145 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001146 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
1147
David Woodhousec416daa2009-05-10 20:30:58 +01001148 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001149
1150 /* Make sure hardware complete it */
1151 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001152 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001153
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001154 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001155}
1156
1157static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1158{
1159 u32 val;
1160 unsigned long flag;
1161
David Woodhouse9af88142009-02-13 23:18:03 +00001162 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001163 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001164
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001165 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001166 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001167
1168 /* Make sure hardware complete it */
1169 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001170 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001171
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001172 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001173}
1174
1175/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001176static void __iommu_flush_context(struct intel_iommu *iommu,
1177 u16 did, u16 source_id, u8 function_mask,
1178 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001179{
1180 u64 val = 0;
1181 unsigned long flag;
1182
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001183 switch (type) {
1184 case DMA_CCMD_GLOBAL_INVL:
1185 val = DMA_CCMD_GLOBAL_INVL;
1186 break;
1187 case DMA_CCMD_DOMAIN_INVL:
1188 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1189 break;
1190 case DMA_CCMD_DEVICE_INVL:
1191 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1192 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1193 break;
1194 default:
1195 BUG();
1196 }
1197 val |= DMA_CCMD_ICC;
1198
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001199 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001200 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1201
1202 /* Make sure hardware complete it */
1203 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1204 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1205
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001206 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001207}
1208
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001209/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001210static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1211 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001212{
1213 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1214 u64 val = 0, val_iva = 0;
1215 unsigned long flag;
1216
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001217 switch (type) {
1218 case DMA_TLB_GLOBAL_FLUSH:
1219 /* global flush doesn't need set IVA_REG */
1220 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1221 break;
1222 case DMA_TLB_DSI_FLUSH:
1223 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1224 break;
1225 case DMA_TLB_PSI_FLUSH:
1226 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001227 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001228 val_iva = size_order | addr;
1229 break;
1230 default:
1231 BUG();
1232 }
1233 /* Note: set drain read/write */
1234#if 0
1235 /*
1236 * This is probably to be super secure.. Looks like we can
1237 * ignore it without any impact.
1238 */
1239 if (cap_read_drain(iommu->cap))
1240 val |= DMA_TLB_READ_DRAIN;
1241#endif
1242 if (cap_write_drain(iommu->cap))
1243 val |= DMA_TLB_WRITE_DRAIN;
1244
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001245 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001246 /* Note: Only uses first TLB reg currently */
1247 if (val_iva)
1248 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1249 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1250
1251 /* Make sure hardware complete it */
1252 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1253 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1254
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001255 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001256
1257 /* check IOTLB invalidation granularity */
1258 if (DMA_TLB_IAIG(val) == 0)
1259 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1260 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1261 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001262 (unsigned long long)DMA_TLB_IIRG(type),
1263 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001264}
1265
David Woodhouse64ae8922014-03-09 12:52:30 -07001266static struct device_domain_info *
1267iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1268 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001269{
Yu Zhao93a23a72009-05-18 13:51:37 +08001270 int found = 0;
1271 unsigned long flags;
1272 struct device_domain_info *info;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001273 struct pci_dev *pdev;
Yu Zhao93a23a72009-05-18 13:51:37 +08001274
1275 if (!ecap_dev_iotlb_support(iommu->ecap))
1276 return NULL;
1277
1278 if (!iommu->qi)
1279 return NULL;
1280
1281 spin_lock_irqsave(&device_domain_lock, flags);
1282 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001283 if (info->iommu == iommu && info->bus == bus &&
1284 info->devfn == devfn) {
Yu Zhao93a23a72009-05-18 13:51:37 +08001285 found = 1;
1286 break;
1287 }
1288 spin_unlock_irqrestore(&device_domain_lock, flags);
1289
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001290 if (!found || !info->dev || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001291 return NULL;
1292
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001293 pdev = to_pci_dev(info->dev);
1294
1295 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Yu Zhao93a23a72009-05-18 13:51:37 +08001296 return NULL;
1297
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001298 if (!dmar_find_matched_atsr_unit(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001299 return NULL;
1300
Yu Zhao93a23a72009-05-18 13:51:37 +08001301 return info;
1302}
1303
1304static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1305{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001306 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001307 return;
1308
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001309 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Yu Zhao93a23a72009-05-18 13:51:37 +08001310}
1311
1312static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1313{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001314 if (!info->dev || !dev_is_pci(info->dev) ||
1315 !pci_ats_enabled(to_pci_dev(info->dev)))
Yu Zhao93a23a72009-05-18 13:51:37 +08001316 return;
1317
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001318 pci_disable_ats(to_pci_dev(info->dev));
Yu Zhao93a23a72009-05-18 13:51:37 +08001319}
1320
1321static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1322 u64 addr, unsigned mask)
1323{
1324 u16 sid, qdep;
1325 unsigned long flags;
1326 struct device_domain_info *info;
1327
1328 spin_lock_irqsave(&device_domain_lock, flags);
1329 list_for_each_entry(info, &domain->devices, link) {
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001330 struct pci_dev *pdev;
1331 if (!info->dev || !dev_is_pci(info->dev))
1332 continue;
1333
1334 pdev = to_pci_dev(info->dev);
1335 if (!pci_ats_enabled(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001336 continue;
1337
1338 sid = info->bus << 8 | info->devfn;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001339 qdep = pci_ats_queue_depth(pdev);
Yu Zhao93a23a72009-05-18 13:51:37 +08001340 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1341 }
1342 spin_unlock_irqrestore(&device_domain_lock, flags);
1343}
1344
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001345static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouseea8ea462014-03-05 17:09:32 +00001346 unsigned long pfn, unsigned int pages, int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001347{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001348 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001349 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001350
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001351 BUG_ON(pages == 0);
1352
David Woodhouseea8ea462014-03-05 17:09:32 +00001353 if (ih)
1354 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001355 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001356 * Fallback to domain selective flush if no PSI support or the size is
1357 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001358 * PSI requires page size to be 2 ^ x, and the base address is naturally
1359 * aligned to the size
1360 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001361 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1362 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001363 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001364 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001365 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001366 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001367
1368 /*
Nadav Amit82653632010-04-01 13:24:40 +03001369 * In caching mode, changes of pages from non-present to present require
1370 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001371 */
Nadav Amit82653632010-04-01 13:24:40 +03001372 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001373 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001374}
1375
mark grossf8bab732008-02-08 04:18:38 -08001376static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1377{
1378 u32 pmen;
1379 unsigned long flags;
1380
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001381 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001382 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1383 pmen &= ~DMA_PMEN_EPM;
1384 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1385
1386 /* wait for the protected region status bit to clear */
1387 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1388 readl, !(pmen & DMA_PMEN_PRS), pmen);
1389
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001390 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001391}
1392
Jiang Liu2a41cce2014-07-11 14:19:33 +08001393static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001394{
1395 u32 sts;
1396 unsigned long flags;
1397
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001398 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001399 iommu->gcmd |= DMA_GCMD_TE;
1400 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001401
1402 /* Make sure hardware complete it */
1403 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001404 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001405
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001406 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001407}
1408
Jiang Liu2a41cce2014-07-11 14:19:33 +08001409static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001410{
1411 u32 sts;
1412 unsigned long flag;
1413
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001414 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001415 iommu->gcmd &= ~DMA_GCMD_TE;
1416 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1417
1418 /* Make sure hardware complete it */
1419 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001420 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001421
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001422 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001423}
1424
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001425
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001426static int iommu_init_domains(struct intel_iommu *iommu)
1427{
1428 unsigned long ndomains;
1429 unsigned long nlongs;
1430
1431 ndomains = cap_ndoms(iommu->cap);
Jiang Liu852bdb02014-01-06 14:18:11 +08001432 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1433 iommu->seq_id, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001434 nlongs = BITS_TO_LONGS(ndomains);
1435
Donald Dutile94a91b52009-08-20 16:51:34 -04001436 spin_lock_init(&iommu->lock);
1437
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001438 /* TBD: there might be 64K domains,
1439 * consider other allocation for future chip
1440 */
1441 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1442 if (!iommu->domain_ids) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001443 pr_err("IOMMU%d: allocating domain id array failed\n",
1444 iommu->seq_id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001445 return -ENOMEM;
1446 }
1447 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1448 GFP_KERNEL);
1449 if (!iommu->domains) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001450 pr_err("IOMMU%d: allocating domain array failed\n",
1451 iommu->seq_id);
1452 kfree(iommu->domain_ids);
1453 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001454 return -ENOMEM;
1455 }
1456
1457 /*
1458 * if Caching mode is set, then invalid translations are tagged
1459 * with domainid 0. Hence we need to pre-allocate it.
1460 */
1461 if (cap_caching_mode(iommu->cap))
1462 set_bit(0, iommu->domain_ids);
1463 return 0;
1464}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001465
Jiang Liuffebeb42014-11-09 22:48:02 +08001466static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001467{
1468 struct dmar_domain *domain;
Jiang Liu2a46ddf2014-07-11 14:19:30 +08001469 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001470
Donald Dutile94a91b52009-08-20 16:51:34 -04001471 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001472 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001473 /*
1474 * Domain id 0 is reserved for invalid translation
1475 * if hardware supports caching mode.
1476 */
1477 if (cap_caching_mode(iommu->cap) && i == 0)
1478 continue;
1479
Donald Dutile94a91b52009-08-20 16:51:34 -04001480 domain = iommu->domains[i];
1481 clear_bit(i, iommu->domain_ids);
Jiang Liu129ad282014-07-11 14:19:31 +08001482 if (domain_detach_iommu(domain, iommu) == 0 &&
1483 !domain_type_is_vm(domain))
Jiang Liu92d03cc2014-02-19 14:07:28 +08001484 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001485 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001486 }
1487
1488 if (iommu->gcmd & DMA_GCMD_TE)
1489 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001490}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001491
Jiang Liuffebeb42014-11-09 22:48:02 +08001492static void free_dmar_iommu(struct intel_iommu *iommu)
1493{
1494 if ((iommu->domains) && (iommu->domain_ids)) {
1495 kfree(iommu->domains);
1496 kfree(iommu->domain_ids);
1497 iommu->domains = NULL;
1498 iommu->domain_ids = NULL;
1499 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001500
Weidong Hand9630fe2008-12-08 11:06:32 +08001501 g_iommus[iommu->seq_id] = NULL;
1502
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001503 /* free context mapping */
1504 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001505}
1506
Jiang Liuab8dfe22014-07-11 14:19:27 +08001507static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001508{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001509 /* domain id for virtual machine, it won't be set in context */
1510 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001511 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001512
1513 domain = alloc_domain_mem();
1514 if (!domain)
1515 return NULL;
1516
Jiang Liuab8dfe22014-07-11 14:19:27 +08001517 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001518 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001519 domain->flags = flags;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001520 spin_lock_init(&domain->iommu_lock);
1521 INIT_LIST_HEAD(&domain->devices);
Jiang Liuab8dfe22014-07-11 14:19:27 +08001522 if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001523 domain->id = atomic_inc_return(&vm_domid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001524
1525 return domain;
1526}
1527
Jiang Liufb170fb2014-07-11 14:19:28 +08001528static int __iommu_attach_domain(struct dmar_domain *domain,
1529 struct intel_iommu *iommu)
1530{
1531 int num;
1532 unsigned long ndomains;
1533
1534 ndomains = cap_ndoms(iommu->cap);
1535 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1536 if (num < ndomains) {
1537 set_bit(num, iommu->domain_ids);
1538 iommu->domains[num] = domain;
1539 } else {
1540 num = -ENOSPC;
1541 }
1542
1543 return num;
1544}
1545
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001546static int iommu_attach_domain(struct dmar_domain *domain,
1547 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001548{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001549 int num;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001550 unsigned long flags;
1551
Weidong Han8c11e792008-12-08 15:29:22 +08001552 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001553 num = __iommu_attach_domain(domain, iommu);
Jiang Liu44bde612014-07-11 14:19:29 +08001554 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001555 if (num < 0)
1556 pr_err("IOMMU: no free domain ids\n");
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001557
Jiang Liufb170fb2014-07-11 14:19:28 +08001558 return num;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001559}
1560
Jiang Liu44bde612014-07-11 14:19:29 +08001561static int iommu_attach_vm_domain(struct dmar_domain *domain,
1562 struct intel_iommu *iommu)
1563{
1564 int num;
1565 unsigned long ndomains;
1566
1567 ndomains = cap_ndoms(iommu->cap);
1568 for_each_set_bit(num, iommu->domain_ids, ndomains)
1569 if (iommu->domains[num] == domain)
1570 return num;
1571
1572 return __iommu_attach_domain(domain, iommu);
1573}
1574
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001575static void iommu_detach_domain(struct dmar_domain *domain,
1576 struct intel_iommu *iommu)
1577{
1578 unsigned long flags;
1579 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001580
1581 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001582 if (domain_type_is_vm_or_si(domain)) {
1583 ndomains = cap_ndoms(iommu->cap);
1584 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1585 if (iommu->domains[num] == domain) {
1586 clear_bit(num, iommu->domain_ids);
1587 iommu->domains[num] = NULL;
1588 break;
1589 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001590 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001591 } else {
1592 clear_bit(domain->id, iommu->domain_ids);
1593 iommu->domains[domain->id] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001594 }
Weidong Han8c11e792008-12-08 15:29:22 +08001595 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001596}
1597
Jiang Liufb170fb2014-07-11 14:19:28 +08001598static void domain_attach_iommu(struct dmar_domain *domain,
1599 struct intel_iommu *iommu)
1600{
1601 unsigned long flags;
1602
1603 spin_lock_irqsave(&domain->iommu_lock, flags);
1604 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1605 domain->iommu_count++;
1606 if (domain->iommu_count == 1)
1607 domain->nid = iommu->node;
1608 domain_update_iommu_cap(domain);
1609 }
1610 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1611}
1612
1613static int domain_detach_iommu(struct dmar_domain *domain,
1614 struct intel_iommu *iommu)
1615{
1616 unsigned long flags;
1617 int count = INT_MAX;
1618
1619 spin_lock_irqsave(&domain->iommu_lock, flags);
1620 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1621 count = --domain->iommu_count;
1622 domain_update_iommu_cap(domain);
1623 }
1624 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1625
1626 return count;
1627}
1628
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001629static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001630static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001631
Joseph Cihula51a63e62011-03-21 11:04:24 -07001632static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001633{
1634 struct pci_dev *pdev = NULL;
1635 struct iova *iova;
1636 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001637
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001638 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1639 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001640
Mark Gross8a443df2008-03-04 14:59:31 -08001641 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1642 &reserved_rbtree_key);
1643
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001644 /* IOAPIC ranges shouldn't be accessed by DMA */
1645 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1646 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001647 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001648 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001649 return -ENODEV;
1650 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001651
1652 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1653 for_each_pci_dev(pdev) {
1654 struct resource *r;
1655
1656 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1657 r = &pdev->resource[i];
1658 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1659 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001660 iova = reserve_iova(&reserved_iova_list,
1661 IOVA_PFN(r->start),
1662 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001663 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001664 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001665 return -ENODEV;
1666 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001667 }
1668 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001669 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001670}
1671
1672static void domain_reserve_special_ranges(struct dmar_domain *domain)
1673{
1674 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1675}
1676
1677static inline int guestwidth_to_adjustwidth(int gaw)
1678{
1679 int agaw;
1680 int r = (gaw - 12) % 9;
1681
1682 if (r == 0)
1683 agaw = gaw;
1684 else
1685 agaw = gaw + 9 - r;
1686 if (agaw > 64)
1687 agaw = 64;
1688 return agaw;
1689}
1690
1691static int domain_init(struct dmar_domain *domain, int guest_width)
1692{
1693 struct intel_iommu *iommu;
1694 int adjust_width, agaw;
1695 unsigned long sagaw;
1696
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001697 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1698 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001699 domain_reserve_special_ranges(domain);
1700
1701 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001702 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001703 if (guest_width > cap_mgaw(iommu->cap))
1704 guest_width = cap_mgaw(iommu->cap);
1705 domain->gaw = guest_width;
1706 adjust_width = guestwidth_to_adjustwidth(guest_width);
1707 agaw = width_to_agaw(adjust_width);
1708 sagaw = cap_sagaw(iommu->cap);
1709 if (!test_bit(agaw, &sagaw)) {
1710 /* hardware doesn't support it, choose a bigger one */
1711 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1712 agaw = find_next_bit(&sagaw, 5, agaw);
1713 if (agaw >= 5)
1714 return -ENODEV;
1715 }
1716 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001717
Weidong Han8e6040972008-12-08 15:49:06 +08001718 if (ecap_coherent(iommu->ecap))
1719 domain->iommu_coherency = 1;
1720 else
1721 domain->iommu_coherency = 0;
1722
Sheng Yang58c610b2009-03-18 15:33:05 +08001723 if (ecap_sc_support(iommu->ecap))
1724 domain->iommu_snooping = 1;
1725 else
1726 domain->iommu_snooping = 0;
1727
David Woodhouse214e39a2014-03-19 10:38:49 +00001728 if (intel_iommu_superpage)
1729 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1730 else
1731 domain->iommu_superpage = 0;
1732
Suresh Siddha4c923d42009-10-02 11:01:24 -07001733 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001734
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001735 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001736 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001737 if (!domain->pgd)
1738 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001739 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001740 return 0;
1741}
1742
1743static void domain_exit(struct dmar_domain *domain)
1744{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001745 struct dmar_drhd_unit *drhd;
1746 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00001747 struct page *freelist = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001748
1749 /* Domain 0 is reserved, so dont process it */
1750 if (!domain)
1751 return;
1752
Alex Williamson7b668352011-05-24 12:02:41 +01001753 /* Flush any lazy unmaps that may reference this domain */
1754 if (!intel_iommu_strict)
1755 flush_unmaps_timeout(0);
1756
Jiang Liu92d03cc2014-02-19 14:07:28 +08001757 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001758 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001759
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001760 /* destroy iovas */
1761 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001762
David Woodhouseea8ea462014-03-05 17:09:32 +00001763 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001764
Jiang Liu92d03cc2014-02-19 14:07:28 +08001765 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001766 rcu_read_lock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001767 for_each_active_iommu(iommu, drhd)
Jiang Liufb170fb2014-07-11 14:19:28 +08001768 iommu_detach_domain(domain, iommu);
Jiang Liu0e242612014-02-19 14:07:34 +08001769 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001770
David Woodhouseea8ea462014-03-05 17:09:32 +00001771 dma_free_pagelist(freelist);
1772
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001773 free_domain_mem(domain);
1774}
1775
David Woodhouse64ae8922014-03-09 12:52:30 -07001776static int domain_context_mapping_one(struct dmar_domain *domain,
1777 struct intel_iommu *iommu,
1778 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001779{
1780 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001781 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001782 struct dma_pte *pgd;
Weidong Hanea6606b2008-12-08 23:08:15 +08001783 int id;
1784 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001785 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001786
1787 pr_debug("Set context mapping for %02x:%02x.%d\n",
1788 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001789
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001790 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001791 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1792 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001793
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001794 context = device_to_context_entry(iommu, bus, devfn);
1795 if (!context)
1796 return -ENOMEM;
1797 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001798 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001799 spin_unlock_irqrestore(&iommu->lock, flags);
1800 return 0;
1801 }
1802
Weidong Hanea6606b2008-12-08 23:08:15 +08001803 id = domain->id;
1804 pgd = domain->pgd;
1805
Jiang Liuab8dfe22014-07-11 14:19:27 +08001806 if (domain_type_is_vm_or_si(domain)) {
Jiang Liu44bde612014-07-11 14:19:29 +08001807 if (domain_type_is_vm(domain)) {
1808 id = iommu_attach_vm_domain(domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08001809 if (id < 0) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001810 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001811 pr_err("IOMMU: no free domain ids\n");
Weidong Hanea6606b2008-12-08 23:08:15 +08001812 return -EFAULT;
1813 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001814 }
1815
1816 /* Skip top levels of page tables for
1817 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001818 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001819 */
Chris Wright1672af12009-12-02 12:06:34 -08001820 if (translation != CONTEXT_TT_PASS_THROUGH) {
1821 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1822 pgd = phys_to_virt(dma_pte_addr(pgd));
1823 if (!dma_pte_present(pgd)) {
1824 spin_unlock_irqrestore(&iommu->lock, flags);
1825 return -ENOMEM;
1826 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001827 }
1828 }
1829 }
1830
1831 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001832
Yu Zhao93a23a72009-05-18 13:51:37 +08001833 if (translation != CONTEXT_TT_PASS_THROUGH) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001834 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Yu Zhao93a23a72009-05-18 13:51:37 +08001835 translation = info ? CONTEXT_TT_DEV_IOTLB :
1836 CONTEXT_TT_MULTI_LEVEL;
1837 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001838 /*
1839 * In pass through mode, AW must be programmed to indicate the largest
1840 * AGAW value supported by hardware. And ASR is ignored by hardware.
1841 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001842 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001843 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001844 else {
1845 context_set_address_root(context, virt_to_phys(pgd));
1846 context_set_address_width(context, iommu->agaw);
1847 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001848
1849 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001850 context_set_fault_enable(context);
1851 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001852 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001853
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001854 /*
1855 * It's a non-present to present mapping. If hardware doesn't cache
1856 * non-present entry we only need to flush the write-buffer. If the
1857 * _does_ cache non-present entries, then it does so in the special
1858 * domain #0, which we have to flush:
1859 */
1860 if (cap_caching_mode(iommu->cap)) {
1861 iommu->flush.flush_context(iommu, 0,
1862 (((u16)bus) << 8) | devfn,
1863 DMA_CCMD_MASK_NOBIT,
1864 DMA_CCMD_DEVICE_INVL);
Jiang Liu18fd7792014-07-11 14:19:26 +08001865 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001866 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001867 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001868 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001869 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001870 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001871
Jiang Liufb170fb2014-07-11 14:19:28 +08001872 domain_attach_iommu(domain, iommu);
1873
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001874 return 0;
1875}
1876
Alex Williamson579305f2014-07-03 09:51:43 -06001877struct domain_context_mapping_data {
1878 struct dmar_domain *domain;
1879 struct intel_iommu *iommu;
1880 int translation;
1881};
1882
1883static int domain_context_mapping_cb(struct pci_dev *pdev,
1884 u16 alias, void *opaque)
1885{
1886 struct domain_context_mapping_data *data = opaque;
1887
1888 return domain_context_mapping_one(data->domain, data->iommu,
1889 PCI_BUS_NUM(alias), alias & 0xff,
1890 data->translation);
1891}
1892
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001893static int
David Woodhousee1f167f2014-03-09 15:24:46 -07001894domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1895 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001896{
David Woodhouse64ae8922014-03-09 12:52:30 -07001897 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001898 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06001899 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001900
David Woodhousee1f167f2014-03-09 15:24:46 -07001901 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07001902 if (!iommu)
1903 return -ENODEV;
1904
Alex Williamson579305f2014-07-03 09:51:43 -06001905 if (!dev_is_pci(dev))
1906 return domain_context_mapping_one(domain, iommu, bus, devfn,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001907 translation);
Alex Williamson579305f2014-07-03 09:51:43 -06001908
1909 data.domain = domain;
1910 data.iommu = iommu;
1911 data.translation = translation;
1912
1913 return pci_for_each_dma_alias(to_pci_dev(dev),
1914 &domain_context_mapping_cb, &data);
1915}
1916
1917static int domain_context_mapped_cb(struct pci_dev *pdev,
1918 u16 alias, void *opaque)
1919{
1920 struct intel_iommu *iommu = opaque;
1921
1922 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001923}
1924
David Woodhousee1f167f2014-03-09 15:24:46 -07001925static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001926{
Weidong Han5331fe62008-12-08 23:00:00 +08001927 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001928 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08001929
David Woodhousee1f167f2014-03-09 15:24:46 -07001930 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001931 if (!iommu)
1932 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001933
Alex Williamson579305f2014-07-03 09:51:43 -06001934 if (!dev_is_pci(dev))
1935 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07001936
Alex Williamson579305f2014-07-03 09:51:43 -06001937 return !pci_for_each_dma_alias(to_pci_dev(dev),
1938 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001939}
1940
Fenghua Yuf5329592009-08-04 15:09:37 -07001941/* Returns a number of VTD pages, but aligned to MM page size */
1942static inline unsigned long aligned_nrpages(unsigned long host_addr,
1943 size_t size)
1944{
1945 host_addr &= ~PAGE_MASK;
1946 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1947}
1948
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001949/* Return largest possible superpage level for a given mapping */
1950static inline int hardware_largepage_caps(struct dmar_domain *domain,
1951 unsigned long iov_pfn,
1952 unsigned long phy_pfn,
1953 unsigned long pages)
1954{
1955 int support, level = 1;
1956 unsigned long pfnmerge;
1957
1958 support = domain->iommu_superpage;
1959
1960 /* To use a large page, the virtual *and* physical addresses
1961 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1962 of them will mean we have to use smaller pages. So just
1963 merge them and check both at once. */
1964 pfnmerge = iov_pfn | phy_pfn;
1965
1966 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1967 pages >>= VTD_STRIDE_SHIFT;
1968 if (!pages)
1969 break;
1970 pfnmerge >>= VTD_STRIDE_SHIFT;
1971 level++;
1972 support--;
1973 }
1974 return level;
1975}
1976
David Woodhouse9051aa02009-06-29 12:30:54 +01001977static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1978 struct scatterlist *sg, unsigned long phys_pfn,
1979 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001980{
1981 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001982 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08001983 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001984 unsigned int largepage_lvl = 0;
1985 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001986
Jiang Liu162d1b12014-07-11 14:19:35 +08001987 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01001988
1989 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1990 return -EINVAL;
1991
1992 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1993
Jiang Liucc4f14a2014-11-26 09:42:10 +08001994 if (!sg) {
1995 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01001996 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1997 }
1998
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001999 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002000 uint64_t tmp;
2001
David Woodhousee1605492009-06-29 11:17:38 +01002002 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07002003 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01002004 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2005 sg->dma_length = sg->length;
2006 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002007 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002008 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002009
David Woodhousee1605492009-06-29 11:17:38 +01002010 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002011 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2012
David Woodhouse5cf0a762014-03-19 16:07:49 +00002013 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002014 if (!pte)
2015 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002016 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002017 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002018 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002019 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2020 /*
2021 * Ensure that old small page tables are
2022 * removed to make room for superpage,
2023 * if they exist.
2024 */
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002025 dma_pte_free_pagetable(domain, iov_pfn,
Jiang Liud41a4ad2014-07-11 14:19:34 +08002026 iov_pfn + lvl_pages - 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002027 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002028 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002029 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002030
David Woodhousee1605492009-06-29 11:17:38 +01002031 }
2032 /* We don't need lock here, nobody else
2033 * touches the iova range
2034 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002035 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002036 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002037 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01002038 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2039 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002040 if (dumps) {
2041 dumps--;
2042 debug_dma_dump_mappings(NULL);
2043 }
2044 WARN_ON(1);
2045 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002046
2047 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2048
2049 BUG_ON(nr_pages < lvl_pages);
2050 BUG_ON(sg_res < lvl_pages);
2051
2052 nr_pages -= lvl_pages;
2053 iov_pfn += lvl_pages;
2054 phys_pfn += lvl_pages;
2055 pteval += lvl_pages * VTD_PAGE_SIZE;
2056 sg_res -= lvl_pages;
2057
2058 /* If the next PTE would be the first in a new page, then we
2059 need to flush the cache on the entries we've just written.
2060 And then we'll need to recalculate 'pte', so clear it and
2061 let it get set again in the if (!pte) block above.
2062
2063 If we're done (!nr_pages) we need to flush the cache too.
2064
2065 Also if we've been setting superpages, we may need to
2066 recalculate 'pte' and switch back to smaller pages for the
2067 end of the mapping, if the trailing size is not enough to
2068 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002069 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002070 if (!nr_pages || first_pte_in_page(pte) ||
2071 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002072 domain_flush_cache(domain, first_pte,
2073 (void *)pte - (void *)first_pte);
2074 pte = NULL;
2075 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002076
2077 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002078 sg = sg_next(sg);
2079 }
2080 return 0;
2081}
2082
David Woodhouse9051aa02009-06-29 12:30:54 +01002083static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2084 struct scatterlist *sg, unsigned long nr_pages,
2085 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002086{
David Woodhouse9051aa02009-06-29 12:30:54 +01002087 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2088}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002089
David Woodhouse9051aa02009-06-29 12:30:54 +01002090static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2091 unsigned long phys_pfn, unsigned long nr_pages,
2092 int prot)
2093{
2094 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002095}
2096
Weidong Hanc7151a82008-12-08 22:51:37 +08002097static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002098{
Weidong Hanc7151a82008-12-08 22:51:37 +08002099 if (!iommu)
2100 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002101
2102 clear_context_table(iommu, bus, devfn);
2103 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002104 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002105 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002106}
2107
David Woodhouse109b9b02012-05-25 17:43:02 +01002108static inline void unlink_domain_info(struct device_domain_info *info)
2109{
2110 assert_spin_locked(&device_domain_lock);
2111 list_del(&info->link);
2112 list_del(&info->global);
2113 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002114 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002115}
2116
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002117static void domain_remove_dev_info(struct dmar_domain *domain)
2118{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002119 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002120 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002121
2122 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wang3a74ca02014-05-20 20:37:47 +08002123 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhouse109b9b02012-05-25 17:43:02 +01002124 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002125 spin_unlock_irqrestore(&device_domain_lock, flags);
2126
Yu Zhao93a23a72009-05-18 13:51:37 +08002127 iommu_disable_dev_iotlb(info);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002128 iommu_detach_dev(info->iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002129
Jiang Liuab8dfe22014-07-11 14:19:27 +08002130 if (domain_type_is_vm(domain)) {
David Woodhouse7c7faa12014-03-09 13:33:06 -07002131 iommu_detach_dependent_devices(info->iommu, info->dev);
Jiang Liufb170fb2014-07-11 14:19:28 +08002132 domain_detach_iommu(domain, info->iommu);
Jiang Liu92d03cc2014-02-19 14:07:28 +08002133 }
2134
2135 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002136 spin_lock_irqsave(&device_domain_lock, flags);
2137 }
2138 spin_unlock_irqrestore(&device_domain_lock, flags);
2139}
2140
2141/*
2142 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002143 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002144 */
David Woodhouse1525a292014-03-06 16:19:30 +00002145static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002146{
2147 struct device_domain_info *info;
2148
2149 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002150 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002151 if (info)
2152 return info->domain;
2153 return NULL;
2154}
2155
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002156static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002157dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2158{
2159 struct device_domain_info *info;
2160
2161 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002162 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002163 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002164 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002165
2166 return NULL;
2167}
2168
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002169static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
David Woodhouse41e80dca2014-03-09 13:55:54 -07002170 int bus, int devfn,
David Woodhouseb718cd32014-03-09 13:11:33 -07002171 struct device *dev,
2172 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002173{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002174 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002175 struct device_domain_info *info;
2176 unsigned long flags;
2177
2178 info = alloc_devinfo_mem();
2179 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002180 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002181
Jiang Liu745f2582014-02-19 14:07:26 +08002182 info->bus = bus;
2183 info->devfn = devfn;
2184 info->dev = dev;
2185 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002186 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002187
2188 spin_lock_irqsave(&device_domain_lock, flags);
2189 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002190 found = find_domain(dev);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002191 else {
2192 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002193 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002194 if (info2)
2195 found = info2->domain;
2196 }
Jiang Liu745f2582014-02-19 14:07:26 +08002197 if (found) {
2198 spin_unlock_irqrestore(&device_domain_lock, flags);
2199 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002200 /* Caller must free the original domain */
2201 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002202 }
2203
David Woodhouseb718cd32014-03-09 13:11:33 -07002204 list_add(&info->link, &domain->devices);
2205 list_add(&info->global, &device_domain_list);
2206 if (dev)
2207 dev->archdata.iommu = info;
2208 spin_unlock_irqrestore(&device_domain_lock, flags);
2209
2210 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002211}
2212
Alex Williamson579305f2014-07-03 09:51:43 -06002213static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2214{
2215 *(u16 *)opaque = alias;
2216 return 0;
2217}
2218
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002219/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002220static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002221{
Alex Williamson579305f2014-07-03 09:51:43 -06002222 struct dmar_domain *domain, *tmp;
2223 struct intel_iommu *iommu;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002224 struct device_domain_info *info;
Alex Williamson579305f2014-07-03 09:51:43 -06002225 u16 dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002226 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002227 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002228
David Woodhouse146922e2014-03-09 15:44:17 -07002229 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002230 if (domain)
2231 return domain;
2232
David Woodhouse146922e2014-03-09 15:44:17 -07002233 iommu = device_to_iommu(dev, &bus, &devfn);
2234 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002235 return NULL;
2236
2237 if (dev_is_pci(dev)) {
2238 struct pci_dev *pdev = to_pci_dev(dev);
2239
2240 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2241
2242 spin_lock_irqsave(&device_domain_lock, flags);
2243 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2244 PCI_BUS_NUM(dma_alias),
2245 dma_alias & 0xff);
2246 if (info) {
2247 iommu = info->iommu;
2248 domain = info->domain;
2249 }
2250 spin_unlock_irqrestore(&device_domain_lock, flags);
2251
2252 /* DMA alias already has a domain, uses it */
2253 if (info)
2254 goto found_domain;
2255 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002256
David Woodhouse146922e2014-03-09 15:44:17 -07002257 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002258 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002259 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002260 return NULL;
Jiang Liu44bde612014-07-11 14:19:29 +08002261 domain->id = iommu_attach_domain(domain, iommu);
2262 if (domain->id < 0) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002263 free_domain_mem(domain);
Alex Williamson579305f2014-07-03 09:51:43 -06002264 return NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002265 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002266 domain_attach_iommu(domain, iommu);
Alex Williamson579305f2014-07-03 09:51:43 -06002267 if (domain_init(domain, gaw)) {
2268 domain_exit(domain);
2269 return NULL;
2270 }
2271
2272 /* register PCI DMA alias device */
2273 if (dev_is_pci(dev)) {
2274 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2275 dma_alias & 0xff, NULL, domain);
2276
2277 if (!tmp || tmp != domain) {
2278 domain_exit(domain);
2279 domain = tmp;
2280 }
2281
David Woodhouseb718cd32014-03-09 13:11:33 -07002282 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002283 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002284 }
2285
2286found_domain:
Alex Williamson579305f2014-07-03 09:51:43 -06002287 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2288
2289 if (!tmp || tmp != domain) {
2290 domain_exit(domain);
2291 domain = tmp;
2292 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002293
2294 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002295}
2296
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002297static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002298#define IDENTMAP_ALL 1
2299#define IDENTMAP_GFX 2
2300#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002301
David Woodhouseb2132032009-06-26 18:50:28 +01002302static int iommu_domain_identity_map(struct dmar_domain *domain,
2303 unsigned long long start,
2304 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002305{
David Woodhousec5395d52009-06-28 16:35:56 +01002306 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2307 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002308
David Woodhousec5395d52009-06-28 16:35:56 +01002309 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2310 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002311 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002312 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002313 }
2314
David Woodhousec5395d52009-06-28 16:35:56 +01002315 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2316 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002317 /*
2318 * RMRR range might have overlap with physical memory range,
2319 * clear it first
2320 */
David Woodhousec5395d52009-06-28 16:35:56 +01002321 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002322
David Woodhousec5395d52009-06-28 16:35:56 +01002323 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2324 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002325 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002326}
2327
David Woodhouse0b9d9752014-03-09 15:48:15 -07002328static int iommu_prepare_identity_map(struct device *dev,
David Woodhouseb2132032009-06-26 18:50:28 +01002329 unsigned long long start,
2330 unsigned long long end)
2331{
2332 struct dmar_domain *domain;
2333 int ret;
2334
David Woodhouse0b9d9752014-03-09 15:48:15 -07002335 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002336 if (!domain)
2337 return -ENOMEM;
2338
David Woodhouse19943b02009-08-04 16:19:20 +01002339 /* For _hardware_ passthrough, don't bother. But for software
2340 passthrough, we do it anyway -- it may indicate a memory
2341 range which is reserved in E820, so which didn't get set
2342 up to start with in si_domain */
2343 if (domain == si_domain && hw_pass_through) {
2344 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002345 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002346 return 0;
2347 }
2348
2349 printk(KERN_INFO
2350 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002351 dev_name(dev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002352
David Woodhouse5595b522009-12-02 09:21:55 +00002353 if (end < start) {
2354 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2355 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2356 dmi_get_system_info(DMI_BIOS_VENDOR),
2357 dmi_get_system_info(DMI_BIOS_VERSION),
2358 dmi_get_system_info(DMI_PRODUCT_VERSION));
2359 ret = -EIO;
2360 goto error;
2361 }
2362
David Woodhouse2ff729f2009-08-26 14:25:41 +01002363 if (end >> agaw_to_width(domain->agaw)) {
2364 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2365 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2366 agaw_to_width(domain->agaw),
2367 dmi_get_system_info(DMI_BIOS_VENDOR),
2368 dmi_get_system_info(DMI_BIOS_VERSION),
2369 dmi_get_system_info(DMI_PRODUCT_VERSION));
2370 ret = -EIO;
2371 goto error;
2372 }
David Woodhouse19943b02009-08-04 16:19:20 +01002373
David Woodhouseb2132032009-06-26 18:50:28 +01002374 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002375 if (ret)
2376 goto error;
2377
2378 /* context entry init */
David Woodhouse0b9d9752014-03-09 15:48:15 -07002379 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002380 if (ret)
2381 goto error;
2382
2383 return 0;
2384
2385 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002386 domain_exit(domain);
2387 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002388}
2389
2390static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002391 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002392{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002393 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002394 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002395 return iommu_prepare_identity_map(dev, rmrr->base_address,
2396 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002397}
2398
Suresh Siddhad3f13812011-08-23 17:05:25 -07002399#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002400static inline void iommu_prepare_isa(void)
2401{
2402 struct pci_dev *pdev;
2403 int ret;
2404
2405 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2406 if (!pdev)
2407 return;
2408
David Woodhousec7ab48d2009-06-26 19:10:36 +01002409 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002410 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002411
2412 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002413 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2414 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002415
Yijing Wang9b27e822014-05-20 20:37:52 +08002416 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002417}
2418#else
2419static inline void iommu_prepare_isa(void)
2420{
2421 return;
2422}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002423#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002424
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002425static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002426
Matt Kraai071e1372009-08-23 22:30:22 -07002427static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002428{
2429 struct dmar_drhd_unit *drhd;
2430 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002431 int nid, ret = 0;
Jiang Liu44bde612014-07-11 14:19:29 +08002432 bool first = true;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002433
Jiang Liuab8dfe22014-07-11 14:19:27 +08002434 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002435 if (!si_domain)
2436 return -EFAULT;
2437
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002438 for_each_active_iommu(iommu, drhd) {
2439 ret = iommu_attach_domain(si_domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08002440 if (ret < 0) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002441 domain_exit(si_domain);
2442 return -EFAULT;
Jiang Liu44bde612014-07-11 14:19:29 +08002443 } else if (first) {
2444 si_domain->id = ret;
2445 first = false;
2446 } else if (si_domain->id != ret) {
2447 domain_exit(si_domain);
2448 return -EFAULT;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002449 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002450 domain_attach_iommu(si_domain, iommu);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002451 }
2452
2453 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2454 domain_exit(si_domain);
2455 return -EFAULT;
2456 }
2457
Jiang Liu9544c002014-01-06 14:18:13 +08002458 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2459 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002460
David Woodhouse19943b02009-08-04 16:19:20 +01002461 if (hw)
2462 return 0;
2463
David Woodhousec7ab48d2009-06-26 19:10:36 +01002464 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002465 unsigned long start_pfn, end_pfn;
2466 int i;
2467
2468 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2469 ret = iommu_domain_identity_map(si_domain,
2470 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2471 if (ret)
2472 return ret;
2473 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002474 }
2475
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002476 return 0;
2477}
2478
David Woodhouse9b226622014-03-09 14:03:28 -07002479static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002480{
2481 struct device_domain_info *info;
2482
2483 if (likely(!iommu_identity_mapping))
2484 return 0;
2485
David Woodhouse9b226622014-03-09 14:03:28 -07002486 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002487 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2488 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002489
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002490 return 0;
2491}
2492
2493static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5913c9b2014-03-09 16:27:31 -07002494 struct device *dev, int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002495{
David Woodhouse0ac72662014-03-09 13:19:22 -07002496 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002497 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002498 u8 bus, devfn;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002499 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002500
David Woodhouse5913c9b2014-03-09 16:27:31 -07002501 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002502 if (!iommu)
2503 return -ENODEV;
2504
David Woodhouse5913c9b2014-03-09 16:27:31 -07002505 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002506 if (ndomain != domain)
2507 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002508
David Woodhouse5913c9b2014-03-09 16:27:31 -07002509 ret = domain_context_mapping(domain, dev, translation);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002510 if (ret) {
David Woodhouse5913c9b2014-03-09 16:27:31 -07002511 domain_remove_one_dev_info(domain, dev);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002512 return ret;
2513 }
2514
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002515 return 0;
2516}
2517
David Woodhouse0b9d9752014-03-09 15:48:15 -07002518static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002519{
2520 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002521 struct device *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002522 int i;
2523
Jiang Liu0e242612014-02-19 14:07:34 +08002524 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002525 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002526 /*
2527 * Return TRUE if this RMRR contains the device that
2528 * is passed in.
2529 */
2530 for_each_active_dev_scope(rmrr->devices,
2531 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002532 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002533 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002534 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002535 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002536 }
Jiang Liu0e242612014-02-19 14:07:34 +08002537 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002538 return false;
2539}
2540
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002541/*
2542 * There are a couple cases where we need to restrict the functionality of
2543 * devices associated with RMRRs. The first is when evaluating a device for
2544 * identity mapping because problems exist when devices are moved in and out
2545 * of domains and their respective RMRR information is lost. This means that
2546 * a device with associated RMRRs will never be in a "passthrough" domain.
2547 * The second is use of the device through the IOMMU API. This interface
2548 * expects to have full control of the IOVA space for the device. We cannot
2549 * satisfy both the requirement that RMRR access is maintained and have an
2550 * unencumbered IOVA space. We also have no ability to quiesce the device's
2551 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2552 * We therefore prevent devices associated with an RMRR from participating in
2553 * the IOMMU API, which eliminates them from device assignment.
2554 *
2555 * In both cases we assume that PCI USB devices with RMRRs have them largely
2556 * for historical reasons and that the RMRR space is not actively used post
2557 * boot. This exclusion may change if vendors begin to abuse it.
2558 */
2559static bool device_is_rmrr_locked(struct device *dev)
2560{
2561 if (!device_has_rmrr(dev))
2562 return false;
2563
2564 if (dev_is_pci(dev)) {
2565 struct pci_dev *pdev = to_pci_dev(dev);
2566
2567 if ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
2568 return false;
2569 }
2570
2571 return true;
2572}
2573
David Woodhouse3bdb2592014-03-09 16:03:08 -07002574static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002575{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002576
David Woodhouse3bdb2592014-03-09 16:03:08 -07002577 if (dev_is_pci(dev)) {
2578 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002579
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002580 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002581 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002582
David Woodhouse3bdb2592014-03-09 16:03:08 -07002583 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2584 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002585
David Woodhouse3bdb2592014-03-09 16:03:08 -07002586 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2587 return 1;
2588
2589 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2590 return 0;
2591
2592 /*
2593 * We want to start off with all devices in the 1:1 domain, and
2594 * take them out later if we find they can't access all of memory.
2595 *
2596 * However, we can't do this for PCI devices behind bridges,
2597 * because all PCI devices behind the same bridge will end up
2598 * with the same source-id on their transactions.
2599 *
2600 * Practically speaking, we can't change things around for these
2601 * devices at run-time, because we can't be sure there'll be no
2602 * DMA transactions in flight for any of their siblings.
2603 *
2604 * So PCI devices (unless they're on the root bus) as well as
2605 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2606 * the 1:1 domain, just in _case_ one of their siblings turns out
2607 * not to be able to map all of memory.
2608 */
2609 if (!pci_is_pcie(pdev)) {
2610 if (!pci_is_root_bus(pdev->bus))
2611 return 0;
2612 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2613 return 0;
2614 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2615 return 0;
2616 } else {
2617 if (device_has_rmrr(dev))
2618 return 0;
2619 }
David Woodhouse6941af22009-07-04 18:24:27 +01002620
David Woodhouse3dfc8132009-07-04 19:11:08 +01002621 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002622 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002623 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002624 * take them out of the 1:1 domain later.
2625 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002626 if (!startup) {
2627 /*
2628 * If the device's dma_mask is less than the system's memory
2629 * size then this is not a candidate for identity mapping.
2630 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002631 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002632
David Woodhouse3bdb2592014-03-09 16:03:08 -07002633 if (dev->coherent_dma_mask &&
2634 dev->coherent_dma_mask < dma_mask)
2635 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002636
David Woodhouse3bdb2592014-03-09 16:03:08 -07002637 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002638 }
David Woodhouse6941af22009-07-04 18:24:27 +01002639
2640 return 1;
2641}
2642
David Woodhousecf04eee2014-03-21 16:49:04 +00002643static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2644{
2645 int ret;
2646
2647 if (!iommu_should_identity_map(dev, 1))
2648 return 0;
2649
2650 ret = domain_add_dev_info(si_domain, dev,
2651 hw ? CONTEXT_TT_PASS_THROUGH :
2652 CONTEXT_TT_MULTI_LEVEL);
2653 if (!ret)
2654 pr_info("IOMMU: %s identity mapping for device %s\n",
2655 hw ? "hardware" : "software", dev_name(dev));
2656 else if (ret == -ENODEV)
2657 /* device not associated with an iommu */
2658 ret = 0;
2659
2660 return ret;
2661}
2662
2663
Matt Kraai071e1372009-08-23 22:30:22 -07002664static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002665{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002666 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002667 struct dmar_drhd_unit *drhd;
2668 struct intel_iommu *iommu;
2669 struct device *dev;
2670 int i;
2671 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002672
David Woodhouse19943b02009-08-04 16:19:20 +01002673 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002674 if (ret)
2675 return -EFAULT;
2676
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002677 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002678 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2679 if (ret)
2680 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002681 }
2682
David Woodhousecf04eee2014-03-21 16:49:04 +00002683 for_each_active_iommu(iommu, drhd)
2684 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2685 struct acpi_device_physical_node *pn;
2686 struct acpi_device *adev;
2687
2688 if (dev->bus != &acpi_bus_type)
2689 continue;
2690
2691 adev= to_acpi_device(dev);
2692 mutex_lock(&adev->physical_node_lock);
2693 list_for_each_entry(pn, &adev->physical_node_list, node) {
2694 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2695 if (ret)
2696 break;
2697 }
2698 mutex_unlock(&adev->physical_node_lock);
2699 if (ret)
2700 return ret;
2701 }
2702
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002703 return 0;
2704}
2705
Jiang Liuffebeb42014-11-09 22:48:02 +08002706static void intel_iommu_init_qi(struct intel_iommu *iommu)
2707{
2708 /*
2709 * Start from the sane iommu hardware state.
2710 * If the queued invalidation is already initialized by us
2711 * (for example, while enabling interrupt-remapping) then
2712 * we got the things already rolling from a sane state.
2713 */
2714 if (!iommu->qi) {
2715 /*
2716 * Clear any previous faults.
2717 */
2718 dmar_fault(-1, iommu);
2719 /*
2720 * Disable queued invalidation if supported and already enabled
2721 * before OS handover.
2722 */
2723 dmar_disable_qi(iommu);
2724 }
2725
2726 if (dmar_enable_qi(iommu)) {
2727 /*
2728 * Queued Invalidate not enabled, use Register Based Invalidate
2729 */
2730 iommu->flush.flush_context = __iommu_flush_context;
2731 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2732 pr_info("IOMMU: %s using Register based invalidation\n",
2733 iommu->name);
2734 } else {
2735 iommu->flush.flush_context = qi_flush_context;
2736 iommu->flush.flush_iotlb = qi_flush_iotlb;
2737 pr_info("IOMMU: %s using Queued invalidation\n", iommu->name);
2738 }
2739}
2740
Joseph Cihulab7792602011-05-03 00:08:37 -07002741static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002742{
2743 struct dmar_drhd_unit *drhd;
2744 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002745 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002746 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002747 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002748
2749 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002750 * for each drhd
2751 * allocate root
2752 * initialize and program root entry to not present
2753 * endfor
2754 */
2755 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002756 /*
2757 * lock not needed as this is only incremented in the single
2758 * threaded kernel __init code path all other access are read
2759 * only
2760 */
Jiang Liu78d8e702014-11-09 22:47:57 +08002761 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08002762 g_num_of_iommus++;
2763 continue;
2764 }
2765 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
Jiang Liu78d8e702014-11-09 22:47:57 +08002766 DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002767 }
2768
Jiang Liuffebeb42014-11-09 22:48:02 +08002769 /* Preallocate enough resources for IOMMU hot-addition */
2770 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
2771 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
2772
Weidong Hand9630fe2008-12-08 11:06:32 +08002773 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2774 GFP_KERNEL);
2775 if (!g_iommus) {
2776 printk(KERN_ERR "Allocating global iommu array failed\n");
2777 ret = -ENOMEM;
2778 goto error;
2779 }
2780
mark gross80b20dd2008-04-18 13:53:58 -07002781 deferred_flush = kzalloc(g_num_of_iommus *
2782 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2783 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002784 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08002785 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08002786 }
2787
Jiang Liu7c919772014-01-06 14:18:18 +08002788 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002789 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002790
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002791 ret = iommu_init_domains(iommu);
2792 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002793 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002794
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002795 /*
2796 * TBD:
2797 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002798 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002799 */
2800 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08002801 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002802 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002803 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002804 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002805 }
2806
Jiang Liuffebeb42014-11-09 22:48:02 +08002807 for_each_active_iommu(iommu, drhd)
2808 intel_iommu_init_qi(iommu);
Youquan Songa77b67d2008-10-16 16:31:56 -07002809
David Woodhouse19943b02009-08-04 16:19:20 +01002810 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002811 iommu_identity_mapping |= IDENTMAP_ALL;
2812
Suresh Siddhad3f13812011-08-23 17:05:25 -07002813#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002814 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002815#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002816
2817 check_tylersburg_isoch();
2818
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002819 /*
2820 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002821 * identity mappings for rmrr, gfx, and isa and may fall back to static
2822 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002823 */
David Woodhouse19943b02009-08-04 16:19:20 +01002824 if (iommu_identity_mapping) {
2825 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2826 if (ret) {
2827 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002828 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002829 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002830 }
David Woodhouse19943b02009-08-04 16:19:20 +01002831 /*
2832 * For each rmrr
2833 * for each dev attached to rmrr
2834 * do
2835 * locate drhd for dev, alloc domain for dev
2836 * allocate free domain
2837 * allocate page table entries for rmrr
2838 * if context not allocated for bus
2839 * allocate and init context
2840 * set present in root table for this bus
2841 * init context with domain, translation etc
2842 * endfor
2843 * endfor
2844 */
2845 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2846 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002847 /* some BIOS lists non-exist devices in DMAR table. */
2848 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00002849 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07002850 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01002851 if (ret)
2852 printk(KERN_ERR
2853 "IOMMU: mapping reserved region failed\n");
2854 }
2855 }
2856
2857 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002858
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002859 /*
2860 * for each drhd
2861 * enable fault log
2862 * global invalidate context cache
2863 * global invalidate iotlb
2864 * enable translation
2865 */
Jiang Liu7c919772014-01-06 14:18:18 +08002866 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002867 if (drhd->ignored) {
2868 /*
2869 * we always have to disable PMRs or DMA may fail on
2870 * this device
2871 */
2872 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08002873 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002874 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002875 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002876
2877 iommu_flush_write_buffer(iommu);
2878
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002879 ret = dmar_set_interrupt(iommu);
2880 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002881 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002882
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002883 iommu_set_root_entry(iommu);
2884
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002885 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002886 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Jiang Liu2a41cce2014-07-11 14:19:33 +08002887 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07002888 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002889 }
2890
2891 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08002892
2893free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08002894 for_each_active_iommu(iommu, drhd) {
2895 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08002896 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08002897 }
Jiang Liu9bdc5312014-01-06 14:18:27 +08002898 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08002899free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08002900 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08002901error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002902 return ret;
2903}
2904
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002905/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002906static struct iova *intel_alloc_iova(struct device *dev,
2907 struct dmar_domain *domain,
2908 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002909{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002910 struct iova *iova = NULL;
2911
David Woodhouse875764d2009-06-28 21:20:51 +01002912 /* Restrict dma_mask to the width that the iommu can handle */
2913 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2914
2915 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002916 /*
2917 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002918 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002919 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002920 */
David Woodhouse875764d2009-06-28 21:20:51 +01002921 iova = alloc_iova(&domain->iovad, nrpages,
2922 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2923 if (iova)
2924 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002925 }
David Woodhouse875764d2009-06-28 21:20:51 +01002926 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2927 if (unlikely(!iova)) {
2928 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07002929 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002930 return NULL;
2931 }
2932
2933 return iova;
2934}
2935
David Woodhoused4b709f2014-03-09 16:07:40 -07002936static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002937{
2938 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002939 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002940
David Woodhoused4b709f2014-03-09 16:07:40 -07002941 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002942 if (!domain) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002943 printk(KERN_ERR "Allocating domain for %s failed",
2944 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002945 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002946 }
2947
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002948 /* make sure context mapping is ok */
David Woodhoused4b709f2014-03-09 16:07:40 -07002949 if (unlikely(!domain_context_mapped(dev))) {
2950 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002951 if (ret) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002952 printk(KERN_ERR "Domain context map for %s failed",
2953 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002954 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002955 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002956 }
2957
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002958 return domain;
2959}
2960
David Woodhoused4b709f2014-03-09 16:07:40 -07002961static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01002962{
2963 struct device_domain_info *info;
2964
2965 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07002966 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01002967 if (likely(info))
2968 return info->domain;
2969
2970 return __get_valid_domain_for_dev(dev);
2971}
2972
David Woodhouse3d891942014-03-06 15:59:26 +00002973static int iommu_dummy(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002974{
David Woodhouse3d891942014-03-06 15:59:26 +00002975 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002976}
2977
David Woodhouseecb509e2014-03-09 16:29:55 -07002978/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002979static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002980{
2981 int found;
2982
David Woodhouse3d891942014-03-06 15:59:26 +00002983 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002984 return 1;
2985
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002986 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002987 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002988
David Woodhouse9b226622014-03-09 14:03:28 -07002989 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002990 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07002991 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002992 return 1;
2993 else {
2994 /*
2995 * 32 bit DMA is removed from si_domain and fall back
2996 * to non-identity mapping.
2997 */
David Woodhousebf9c9ed2014-03-09 16:19:13 -07002998 domain_remove_one_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002999 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07003000 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003001 return 0;
3002 }
3003 } else {
3004 /*
3005 * In case of a detached 64 bit DMA device from vm, the device
3006 * is put into si_domain for identity mapping.
3007 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003008 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003009 int ret;
David Woodhouse5913c9b2014-03-09 16:27:31 -07003010 ret = domain_add_dev_info(si_domain, dev,
David Woodhouse5fe60f42009-08-09 10:53:41 +01003011 hw_pass_through ?
3012 CONTEXT_TT_PASS_THROUGH :
3013 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003014 if (!ret) {
3015 printk(KERN_INFO "64bit %s uses identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07003016 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003017 return 1;
3018 }
3019 }
3020 }
3021
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003022 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003023}
3024
David Woodhouse5040a912014-03-09 16:14:00 -07003025static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003026 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003027{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003028 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003029 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003030 struct iova *iova;
3031 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003032 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003033 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003034 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003035
3036 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003037
David Woodhouse5040a912014-03-09 16:14:00 -07003038 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003039 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003040
David Woodhouse5040a912014-03-09 16:14:00 -07003041 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003042 if (!domain)
3043 return 0;
3044
Weidong Han8c11e792008-12-08 15:29:22 +08003045 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003046 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003047
David Woodhouse5040a912014-03-09 16:14:00 -07003048 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003049 if (!iova)
3050 goto error;
3051
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003052 /*
3053 * Check if DMAR supports zero-length reads on write only
3054 * mappings..
3055 */
3056 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003057 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003058 prot |= DMA_PTE_READ;
3059 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3060 prot |= DMA_PTE_WRITE;
3061 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003062 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003063 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003064 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003065 * is not a big problem
3066 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003067 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003068 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003069 if (ret)
3070 goto error;
3071
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003072 /* it's a non-present to present mapping. Only flush if caching mode */
3073 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003074 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003075 else
Weidong Han8c11e792008-12-08 15:29:22 +08003076 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003077
David Woodhouse03d6a242009-06-28 15:33:46 +01003078 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3079 start_paddr += paddr & ~PAGE_MASK;
3080 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003081
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003082error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003083 if (iova)
3084 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00003085 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003086 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003087 return 0;
3088}
3089
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003090static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3091 unsigned long offset, size_t size,
3092 enum dma_data_direction dir,
3093 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003094{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003095 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003096 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003097}
3098
mark gross5e0d2a62008-03-04 15:22:08 -08003099static void flush_unmaps(void)
3100{
mark gross80b20dd2008-04-18 13:53:58 -07003101 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003102
mark gross5e0d2a62008-03-04 15:22:08 -08003103 timer_on = 0;
3104
3105 /* just flush them all */
3106 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003107 struct intel_iommu *iommu = g_iommus[i];
3108 if (!iommu)
3109 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003110
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003111 if (!deferred_flush[i].next)
3112 continue;
3113
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003114 /* In caching mode, global flushes turn emulation expensive */
3115 if (!cap_caching_mode(iommu->cap))
3116 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003117 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003118 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003119 unsigned long mask;
3120 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003121 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003122
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003123 /* On real hardware multiple invalidations are expensive */
3124 if (cap_caching_mode(iommu->cap))
3125 iommu_flush_iotlb_psi(iommu, domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08003126 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00003127 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003128 else {
Jiang Liua156ef92014-07-11 14:19:36 +08003129 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003130 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3131 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3132 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003133 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003134 if (deferred_flush[i].freelist[j])
3135 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003136 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003137 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003138 }
3139
mark gross5e0d2a62008-03-04 15:22:08 -08003140 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003141}
3142
3143static void flush_unmaps_timeout(unsigned long data)
3144{
mark gross80b20dd2008-04-18 13:53:58 -07003145 unsigned long flags;
3146
3147 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003148 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003149 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003150}
3151
David Woodhouseea8ea462014-03-05 17:09:32 +00003152static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003153{
3154 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003155 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003156 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003157
3158 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003159 if (list_size == HIGH_WATER_MARK)
3160 flush_unmaps();
3161
Weidong Han8c11e792008-12-08 15:29:22 +08003162 iommu = domain_get_iommu(dom);
3163 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003164
mark gross80b20dd2008-04-18 13:53:58 -07003165 next = deferred_flush[iommu_id].next;
3166 deferred_flush[iommu_id].domain[next] = dom;
3167 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003168 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003169 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003170
3171 if (!timer_on) {
3172 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3173 timer_on = 1;
3174 }
3175 list_size++;
3176 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3177}
3178
Jiang Liud41a4ad2014-07-11 14:19:34 +08003179static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003180{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003181 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003182 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003183 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003184 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003185 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003186
David Woodhouse73676832009-07-04 14:08:36 +01003187 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003188 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003189
David Woodhouse1525a292014-03-06 16:19:30 +00003190 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003191 BUG_ON(!domain);
3192
Weidong Han8c11e792008-12-08 15:29:22 +08003193 iommu = domain_get_iommu(domain);
3194
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003195 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003196 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3197 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003198 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003199
David Woodhoused794dc92009-06-28 00:27:49 +01003200 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3201 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003202
David Woodhoused794dc92009-06-28 00:27:49 +01003203 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003204 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003205
David Woodhouseea8ea462014-03-05 17:09:32 +00003206 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003207
mark gross5e0d2a62008-03-04 15:22:08 -08003208 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01003209 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003210 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003211 /* free iova */
3212 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003213 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003214 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003215 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003216 /*
3217 * queue up the release of the unmap to save the 1/6th of the
3218 * cpu used up by the iotlb flush operation...
3219 */
mark gross5e0d2a62008-03-04 15:22:08 -08003220 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003221}
3222
Jiang Liud41a4ad2014-07-11 14:19:34 +08003223static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3224 size_t size, enum dma_data_direction dir,
3225 struct dma_attrs *attrs)
3226{
3227 intel_unmap(dev, dev_addr);
3228}
3229
David Woodhouse5040a912014-03-09 16:14:00 -07003230static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003231 dma_addr_t *dma_handle, gfp_t flags,
3232 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003233{
Akinobu Mita36746432014-06-04 16:06:51 -07003234 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003235 int order;
3236
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003237 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003238 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003239
David Woodhouse5040a912014-03-09 16:14:00 -07003240 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003241 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003242 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3243 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003244 flags |= GFP_DMA;
3245 else
3246 flags |= GFP_DMA32;
3247 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003248
Akinobu Mita36746432014-06-04 16:06:51 -07003249 if (flags & __GFP_WAIT) {
3250 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003251
Akinobu Mita36746432014-06-04 16:06:51 -07003252 page = dma_alloc_from_contiguous(dev, count, order);
3253 if (page && iommu_no_mapping(dev) &&
3254 page_to_phys(page) + size > dev->coherent_dma_mask) {
3255 dma_release_from_contiguous(dev, page, count);
3256 page = NULL;
3257 }
3258 }
3259
3260 if (!page)
3261 page = alloc_pages(flags, order);
3262 if (!page)
3263 return NULL;
3264 memset(page_address(page), 0, size);
3265
3266 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003267 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003268 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003269 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003270 return page_address(page);
3271 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3272 __free_pages(page, order);
3273
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003274 return NULL;
3275}
3276
David Woodhouse5040a912014-03-09 16:14:00 -07003277static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003278 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003279{
3280 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003281 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003282
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003283 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003284 order = get_order(size);
3285
Jiang Liud41a4ad2014-07-11 14:19:34 +08003286 intel_unmap(dev, dma_handle);
Akinobu Mita36746432014-06-04 16:06:51 -07003287 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3288 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003289}
3290
David Woodhouse5040a912014-03-09 16:14:00 -07003291static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003292 int nelems, enum dma_data_direction dir,
3293 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003294{
Jiang Liud41a4ad2014-07-11 14:19:34 +08003295 intel_unmap(dev, sglist[0].dma_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003296}
3297
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003298static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003299 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003300{
3301 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003302 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003303
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003304 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003305 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003306 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003307 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003308 }
3309 return nelems;
3310}
3311
David Woodhouse5040a912014-03-09 16:14:00 -07003312static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003313 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003314{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003315 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003316 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003317 size_t size = 0;
3318 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003319 struct iova *iova = NULL;
3320 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003321 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003322 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003323 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003324
3325 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003326 if (iommu_no_mapping(dev))
3327 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003328
David Woodhouse5040a912014-03-09 16:14:00 -07003329 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003330 if (!domain)
3331 return 0;
3332
Weidong Han8c11e792008-12-08 15:29:22 +08003333 iommu = domain_get_iommu(domain);
3334
David Woodhouseb536d242009-06-28 14:49:31 +01003335 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003336 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003337
David Woodhouse5040a912014-03-09 16:14:00 -07003338 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3339 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003340 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003341 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003342 return 0;
3343 }
3344
3345 /*
3346 * Check if DMAR supports zero-length reads on write only
3347 * mappings..
3348 */
3349 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003350 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003351 prot |= DMA_PTE_READ;
3352 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3353 prot |= DMA_PTE_WRITE;
3354
David Woodhouseb536d242009-06-28 14:49:31 +01003355 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003356
Fenghua Yuf5329592009-08-04 15:09:37 -07003357 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003358 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003359 dma_pte_free_pagetable(domain, start_vpfn,
3360 start_vpfn + size - 1);
David Woodhousee1605492009-06-29 11:17:38 +01003361 __free_iova(&domain->iovad, iova);
3362 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003363 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003364
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003365 /* it's a non-present to present mapping. Only flush if caching mode */
3366 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003367 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003368 else
Weidong Han8c11e792008-12-08 15:29:22 +08003369 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003370
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003371 return nelems;
3372}
3373
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003374static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3375{
3376 return !dma_addr;
3377}
3378
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003379struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003380 .alloc = intel_alloc_coherent,
3381 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003382 .map_sg = intel_map_sg,
3383 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003384 .map_page = intel_map_page,
3385 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003386 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003387};
3388
3389static inline int iommu_domain_cache_init(void)
3390{
3391 int ret = 0;
3392
3393 iommu_domain_cache = kmem_cache_create("iommu_domain",
3394 sizeof(struct dmar_domain),
3395 0,
3396 SLAB_HWCACHE_ALIGN,
3397
3398 NULL);
3399 if (!iommu_domain_cache) {
3400 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3401 ret = -ENOMEM;
3402 }
3403
3404 return ret;
3405}
3406
3407static inline int iommu_devinfo_cache_init(void)
3408{
3409 int ret = 0;
3410
3411 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3412 sizeof(struct device_domain_info),
3413 0,
3414 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003415 NULL);
3416 if (!iommu_devinfo_cache) {
3417 printk(KERN_ERR "Couldn't create devinfo cache\n");
3418 ret = -ENOMEM;
3419 }
3420
3421 return ret;
3422}
3423
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003424static int __init iommu_init_mempool(void)
3425{
3426 int ret;
3427 ret = iommu_iova_cache_init();
3428 if (ret)
3429 return ret;
3430
3431 ret = iommu_domain_cache_init();
3432 if (ret)
3433 goto domain_error;
3434
3435 ret = iommu_devinfo_cache_init();
3436 if (!ret)
3437 return ret;
3438
3439 kmem_cache_destroy(iommu_domain_cache);
3440domain_error:
Robin Murphy85b45452015-01-12 17:51:14 +00003441 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003442
3443 return -ENOMEM;
3444}
3445
3446static void __init iommu_exit_mempool(void)
3447{
3448 kmem_cache_destroy(iommu_devinfo_cache);
3449 kmem_cache_destroy(iommu_domain_cache);
Robin Murphy85b45452015-01-12 17:51:14 +00003450 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003451}
3452
Dan Williams556ab452010-07-23 15:47:56 -07003453static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3454{
3455 struct dmar_drhd_unit *drhd;
3456 u32 vtbar;
3457 int rc;
3458
3459 /* We know that this device on this chipset has its own IOMMU.
3460 * If we find it under a different IOMMU, then the BIOS is lying
3461 * to us. Hope that the IOMMU for this device is actually
3462 * disabled, and it needs no translation...
3463 */
3464 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3465 if (rc) {
3466 /* "can't" happen */
3467 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3468 return;
3469 }
3470 vtbar &= 0xffff0000;
3471
3472 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3473 drhd = dmar_find_matched_drhd_unit(pdev);
3474 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3475 TAINT_FIRMWARE_WORKAROUND,
3476 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3477 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3478}
3479DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3480
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003481static void __init init_no_remapping_devices(void)
3482{
3483 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003484 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003485 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003486
3487 for_each_drhd_unit(drhd) {
3488 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003489 for_each_active_dev_scope(drhd->devices,
3490 drhd->devices_cnt, i, dev)
3491 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003492 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003493 if (i == drhd->devices_cnt)
3494 drhd->ignored = 1;
3495 }
3496 }
3497
Jiang Liu7c919772014-01-06 14:18:18 +08003498 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003499 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003500 continue;
3501
Jiang Liub683b232014-02-19 14:07:32 +08003502 for_each_active_dev_scope(drhd->devices,
3503 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003504 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003505 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003506 if (i < drhd->devices_cnt)
3507 continue;
3508
David Woodhousec0771df2011-10-14 20:59:46 +01003509 /* This IOMMU has *only* gfx devices. Either bypass it or
3510 set the gfx_mapped flag, as appropriate */
3511 if (dmar_map_gfx) {
3512 intel_iommu_gfx_mapped = 1;
3513 } else {
3514 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003515 for_each_active_dev_scope(drhd->devices,
3516 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003517 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003518 }
3519 }
3520}
3521
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003522#ifdef CONFIG_SUSPEND
3523static int init_iommu_hw(void)
3524{
3525 struct dmar_drhd_unit *drhd;
3526 struct intel_iommu *iommu = NULL;
3527
3528 for_each_active_iommu(iommu, drhd)
3529 if (iommu->qi)
3530 dmar_reenable_qi(iommu);
3531
Joseph Cihulab7792602011-05-03 00:08:37 -07003532 for_each_iommu(iommu, drhd) {
3533 if (drhd->ignored) {
3534 /*
3535 * we always have to disable PMRs or DMA may fail on
3536 * this device
3537 */
3538 if (force_on)
3539 iommu_disable_protect_mem_regions(iommu);
3540 continue;
3541 }
3542
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003543 iommu_flush_write_buffer(iommu);
3544
3545 iommu_set_root_entry(iommu);
3546
3547 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003548 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08003549 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3550 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003551 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003552 }
3553
3554 return 0;
3555}
3556
3557static void iommu_flush_all(void)
3558{
3559 struct dmar_drhd_unit *drhd;
3560 struct intel_iommu *iommu;
3561
3562 for_each_active_iommu(iommu, drhd) {
3563 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003564 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003565 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003566 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003567 }
3568}
3569
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003570static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003571{
3572 struct dmar_drhd_unit *drhd;
3573 struct intel_iommu *iommu = NULL;
3574 unsigned long flag;
3575
3576 for_each_active_iommu(iommu, drhd) {
3577 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3578 GFP_ATOMIC);
3579 if (!iommu->iommu_state)
3580 goto nomem;
3581 }
3582
3583 iommu_flush_all();
3584
3585 for_each_active_iommu(iommu, drhd) {
3586 iommu_disable_translation(iommu);
3587
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003588 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003589
3590 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3591 readl(iommu->reg + DMAR_FECTL_REG);
3592 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3593 readl(iommu->reg + DMAR_FEDATA_REG);
3594 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3595 readl(iommu->reg + DMAR_FEADDR_REG);
3596 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3597 readl(iommu->reg + DMAR_FEUADDR_REG);
3598
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003599 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003600 }
3601 return 0;
3602
3603nomem:
3604 for_each_active_iommu(iommu, drhd)
3605 kfree(iommu->iommu_state);
3606
3607 return -ENOMEM;
3608}
3609
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003610static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003611{
3612 struct dmar_drhd_unit *drhd;
3613 struct intel_iommu *iommu = NULL;
3614 unsigned long flag;
3615
3616 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003617 if (force_on)
3618 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3619 else
3620 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003621 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003622 }
3623
3624 for_each_active_iommu(iommu, drhd) {
3625
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003626 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003627
3628 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3629 iommu->reg + DMAR_FECTL_REG);
3630 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3631 iommu->reg + DMAR_FEDATA_REG);
3632 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3633 iommu->reg + DMAR_FEADDR_REG);
3634 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3635 iommu->reg + DMAR_FEUADDR_REG);
3636
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003637 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003638 }
3639
3640 for_each_active_iommu(iommu, drhd)
3641 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003642}
3643
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003644static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003645 .resume = iommu_resume,
3646 .suspend = iommu_suspend,
3647};
3648
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003649static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003650{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003651 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003652}
3653
3654#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003655static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003656#endif /* CONFIG_PM */
3657
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003658
Jiang Liuc2a0b532014-11-09 22:47:56 +08003659int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003660{
3661 struct acpi_dmar_reserved_memory *rmrr;
3662 struct dmar_rmrr_unit *rmrru;
3663
3664 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3665 if (!rmrru)
3666 return -ENOMEM;
3667
3668 rmrru->hdr = header;
3669 rmrr = (struct acpi_dmar_reserved_memory *)header;
3670 rmrru->base_address = rmrr->base_address;
3671 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003672 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3673 ((void *)rmrr) + rmrr->header.length,
3674 &rmrru->devices_cnt);
3675 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3676 kfree(rmrru);
3677 return -ENOMEM;
3678 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003679
Jiang Liu2e455282014-02-19 14:07:36 +08003680 list_add(&rmrru->list, &dmar_rmrr_units);
3681
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003682 return 0;
3683}
3684
Jiang Liu6b197242014-11-09 22:47:58 +08003685static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3686{
3687 struct dmar_atsr_unit *atsru;
3688 struct acpi_dmar_atsr *tmp;
3689
3690 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3691 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3692 if (atsr->segment != tmp->segment)
3693 continue;
3694 if (atsr->header.length != tmp->header.length)
3695 continue;
3696 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3697 return atsru;
3698 }
3699
3700 return NULL;
3701}
3702
3703int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003704{
3705 struct acpi_dmar_atsr *atsr;
3706 struct dmar_atsr_unit *atsru;
3707
Jiang Liu6b197242014-11-09 22:47:58 +08003708 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
3709 return 0;
3710
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003711 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08003712 atsru = dmar_find_atsr(atsr);
3713 if (atsru)
3714 return 0;
3715
3716 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003717 if (!atsru)
3718 return -ENOMEM;
3719
Jiang Liu6b197242014-11-09 22:47:58 +08003720 /*
3721 * If memory is allocated from slab by ACPI _DSM method, we need to
3722 * copy the memory content because the memory buffer will be freed
3723 * on return.
3724 */
3725 atsru->hdr = (void *)(atsru + 1);
3726 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003727 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08003728 if (!atsru->include_all) {
3729 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3730 (void *)atsr + atsr->header.length,
3731 &atsru->devices_cnt);
3732 if (atsru->devices_cnt && atsru->devices == NULL) {
3733 kfree(atsru);
3734 return -ENOMEM;
3735 }
3736 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003737
Jiang Liu0e242612014-02-19 14:07:34 +08003738 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003739
3740 return 0;
3741}
3742
Jiang Liu9bdc5312014-01-06 14:18:27 +08003743static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3744{
3745 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3746 kfree(atsru);
3747}
3748
Jiang Liu6b197242014-11-09 22:47:58 +08003749int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3750{
3751 struct acpi_dmar_atsr *atsr;
3752 struct dmar_atsr_unit *atsru;
3753
3754 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3755 atsru = dmar_find_atsr(atsr);
3756 if (atsru) {
3757 list_del_rcu(&atsru->list);
3758 synchronize_rcu();
3759 intel_iommu_free_atsr(atsru);
3760 }
3761
3762 return 0;
3763}
3764
3765int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3766{
3767 int i;
3768 struct device *dev;
3769 struct acpi_dmar_atsr *atsr;
3770 struct dmar_atsr_unit *atsru;
3771
3772 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3773 atsru = dmar_find_atsr(atsr);
3774 if (!atsru)
3775 return 0;
3776
3777 if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
3778 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
3779 i, dev)
3780 return -EBUSY;
3781
3782 return 0;
3783}
3784
Jiang Liuffebeb42014-11-09 22:48:02 +08003785static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
3786{
3787 int sp, ret = 0;
3788 struct intel_iommu *iommu = dmaru->iommu;
3789
3790 if (g_iommus[iommu->seq_id])
3791 return 0;
3792
3793 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
3794 pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
3795 iommu->name);
3796 return -ENXIO;
3797 }
3798 if (!ecap_sc_support(iommu->ecap) &&
3799 domain_update_iommu_snooping(iommu)) {
3800 pr_warn("IOMMU: %s doesn't support snooping.\n",
3801 iommu->name);
3802 return -ENXIO;
3803 }
3804 sp = domain_update_iommu_superpage(iommu) - 1;
3805 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
3806 pr_warn("IOMMU: %s doesn't support large page.\n",
3807 iommu->name);
3808 return -ENXIO;
3809 }
3810
3811 /*
3812 * Disable translation if already enabled prior to OS handover.
3813 */
3814 if (iommu->gcmd & DMA_GCMD_TE)
3815 iommu_disable_translation(iommu);
3816
3817 g_iommus[iommu->seq_id] = iommu;
3818 ret = iommu_init_domains(iommu);
3819 if (ret == 0)
3820 ret = iommu_alloc_root_entry(iommu);
3821 if (ret)
3822 goto out;
3823
3824 if (dmaru->ignored) {
3825 /*
3826 * we always have to disable PMRs or DMA may fail on this device
3827 */
3828 if (force_on)
3829 iommu_disable_protect_mem_regions(iommu);
3830 return 0;
3831 }
3832
3833 intel_iommu_init_qi(iommu);
3834 iommu_flush_write_buffer(iommu);
3835 ret = dmar_set_interrupt(iommu);
3836 if (ret)
3837 goto disable_iommu;
3838
3839 iommu_set_root_entry(iommu);
3840 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3841 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3842 iommu_enable_translation(iommu);
3843
3844 if (si_domain) {
3845 ret = iommu_attach_domain(si_domain, iommu);
3846 if (ret < 0 || si_domain->id != ret)
3847 goto disable_iommu;
3848 domain_attach_iommu(si_domain, iommu);
3849 }
3850
3851 iommu_disable_protect_mem_regions(iommu);
3852 return 0;
3853
3854disable_iommu:
3855 disable_dmar_iommu(iommu);
3856out:
3857 free_dmar_iommu(iommu);
3858 return ret;
3859}
3860
Jiang Liu6b197242014-11-09 22:47:58 +08003861int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
3862{
Jiang Liuffebeb42014-11-09 22:48:02 +08003863 int ret = 0;
3864 struct intel_iommu *iommu = dmaru->iommu;
3865
3866 if (!intel_iommu_enabled)
3867 return 0;
3868 if (iommu == NULL)
3869 return -EINVAL;
3870
3871 if (insert) {
3872 ret = intel_iommu_add(dmaru);
3873 } else {
3874 disable_dmar_iommu(iommu);
3875 free_dmar_iommu(iommu);
3876 }
3877
3878 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08003879}
3880
Jiang Liu9bdc5312014-01-06 14:18:27 +08003881static void intel_iommu_free_dmars(void)
3882{
3883 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3884 struct dmar_atsr_unit *atsru, *atsr_n;
3885
3886 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3887 list_del(&rmrru->list);
3888 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3889 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003890 }
3891
Jiang Liu9bdc5312014-01-06 14:18:27 +08003892 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3893 list_del(&atsru->list);
3894 intel_iommu_free_atsr(atsru);
3895 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003896}
3897
3898int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3899{
Jiang Liub683b232014-02-19 14:07:32 +08003900 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003901 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00003902 struct pci_dev *bridge = NULL;
3903 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003904 struct acpi_dmar_atsr *atsr;
3905 struct dmar_atsr_unit *atsru;
3906
3907 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003908 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08003909 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003910 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003911 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003912 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003913 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003914 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003915 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08003916 if (!bridge)
3917 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003918
Jiang Liu0e242612014-02-19 14:07:34 +08003919 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08003920 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3921 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3922 if (atsr->segment != pci_domain_nr(dev->bus))
3923 continue;
3924
Jiang Liub683b232014-02-19 14:07:32 +08003925 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00003926 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08003927 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003928
3929 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08003930 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003931 }
Jiang Liub683b232014-02-19 14:07:32 +08003932 ret = 0;
3933out:
Jiang Liu0e242612014-02-19 14:07:34 +08003934 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003935
Jiang Liub683b232014-02-19 14:07:32 +08003936 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003937}
3938
Jiang Liu59ce0512014-02-19 14:07:35 +08003939int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3940{
3941 int ret = 0;
3942 struct dmar_rmrr_unit *rmrru;
3943 struct dmar_atsr_unit *atsru;
3944 struct acpi_dmar_atsr *atsr;
3945 struct acpi_dmar_reserved_memory *rmrr;
3946
3947 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3948 return 0;
3949
3950 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3951 rmrr = container_of(rmrru->hdr,
3952 struct acpi_dmar_reserved_memory, header);
3953 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3954 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3955 ((void *)rmrr) + rmrr->header.length,
3956 rmrr->segment, rmrru->devices,
3957 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08003958 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08003959 return ret;
3960 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08003961 dmar_remove_dev_scope(info, rmrr->segment,
3962 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08003963 }
3964 }
3965
3966 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3967 if (atsru->include_all)
3968 continue;
3969
3970 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3971 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3972 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3973 (void *)atsr + atsr->header.length,
3974 atsr->segment, atsru->devices,
3975 atsru->devices_cnt);
3976 if (ret > 0)
3977 break;
3978 else if(ret < 0)
3979 return ret;
3980 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3981 if (dmar_remove_dev_scope(info, atsr->segment,
3982 atsru->devices, atsru->devices_cnt))
3983 break;
3984 }
3985 }
3986
3987 return 0;
3988}
3989
Fenghua Yu99dcade2009-11-11 07:23:06 -08003990/*
3991 * Here we only respond to action of unbound device from driver.
3992 *
3993 * Added device is not attached to its DMAR domain here yet. That will happen
3994 * when mapping the device to iova.
3995 */
3996static int device_notifier(struct notifier_block *nb,
3997 unsigned long action, void *data)
3998{
3999 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08004000 struct dmar_domain *domain;
4001
David Woodhouse3d891942014-03-06 15:59:26 +00004002 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004003 return 0;
4004
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004005 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004006 return 0;
4007
David Woodhouse1525a292014-03-06 16:19:30 +00004008 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004009 if (!domain)
4010 return 0;
4011
Jiang Liu3a5670e2014-02-19 14:07:33 +08004012 down_read(&dmar_global_lock);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004013 domain_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004014 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004015 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08004016 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07004017
Fenghua Yu99dcade2009-11-11 07:23:06 -08004018 return 0;
4019}
4020
4021static struct notifier_block device_nb = {
4022 .notifier_call = device_notifier,
4023};
4024
Jiang Liu75f05562014-02-19 14:07:37 +08004025static int intel_iommu_memory_notifier(struct notifier_block *nb,
4026 unsigned long val, void *v)
4027{
4028 struct memory_notify *mhp = v;
4029 unsigned long long start, end;
4030 unsigned long start_vpfn, last_vpfn;
4031
4032 switch (val) {
4033 case MEM_GOING_ONLINE:
4034 start = mhp->start_pfn << PAGE_SHIFT;
4035 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4036 if (iommu_domain_identity_map(si_domain, start, end)) {
4037 pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
4038 start, end);
4039 return NOTIFY_BAD;
4040 }
4041 break;
4042
4043 case MEM_OFFLINE:
4044 case MEM_CANCEL_ONLINE:
4045 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4046 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4047 while (start_vpfn <= last_vpfn) {
4048 struct iova *iova;
4049 struct dmar_drhd_unit *drhd;
4050 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004051 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004052
4053 iova = find_iova(&si_domain->iovad, start_vpfn);
4054 if (iova == NULL) {
4055 pr_debug("dmar: failed get IOVA for PFN %lx\n",
4056 start_vpfn);
4057 break;
4058 }
4059
4060 iova = split_and_remove_iova(&si_domain->iovad, iova,
4061 start_vpfn, last_vpfn);
4062 if (iova == NULL) {
4063 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
4064 start_vpfn, last_vpfn);
4065 return NOTIFY_BAD;
4066 }
4067
David Woodhouseea8ea462014-03-05 17:09:32 +00004068 freelist = domain_unmap(si_domain, iova->pfn_lo,
4069 iova->pfn_hi);
4070
Jiang Liu75f05562014-02-19 14:07:37 +08004071 rcu_read_lock();
4072 for_each_active_iommu(iommu, drhd)
4073 iommu_flush_iotlb_psi(iommu, si_domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08004074 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004075 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004076 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004077 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004078
4079 start_vpfn = iova->pfn_hi + 1;
4080 free_iova_mem(iova);
4081 }
4082 break;
4083 }
4084
4085 return NOTIFY_OK;
4086}
4087
4088static struct notifier_block intel_iommu_memory_nb = {
4089 .notifier_call = intel_iommu_memory_notifier,
4090 .priority = 0
4091};
4092
Alex Williamsona5459cf2014-06-12 16:12:31 -06004093
4094static ssize_t intel_iommu_show_version(struct device *dev,
4095 struct device_attribute *attr,
4096 char *buf)
4097{
4098 struct intel_iommu *iommu = dev_get_drvdata(dev);
4099 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4100 return sprintf(buf, "%d:%d\n",
4101 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4102}
4103static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4104
4105static ssize_t intel_iommu_show_address(struct device *dev,
4106 struct device_attribute *attr,
4107 char *buf)
4108{
4109 struct intel_iommu *iommu = dev_get_drvdata(dev);
4110 return sprintf(buf, "%llx\n", iommu->reg_phys);
4111}
4112static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4113
4114static ssize_t intel_iommu_show_cap(struct device *dev,
4115 struct device_attribute *attr,
4116 char *buf)
4117{
4118 struct intel_iommu *iommu = dev_get_drvdata(dev);
4119 return sprintf(buf, "%llx\n", iommu->cap);
4120}
4121static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4122
4123static ssize_t intel_iommu_show_ecap(struct device *dev,
4124 struct device_attribute *attr,
4125 char *buf)
4126{
4127 struct intel_iommu *iommu = dev_get_drvdata(dev);
4128 return sprintf(buf, "%llx\n", iommu->ecap);
4129}
4130static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4131
4132static struct attribute *intel_iommu_attrs[] = {
4133 &dev_attr_version.attr,
4134 &dev_attr_address.attr,
4135 &dev_attr_cap.attr,
4136 &dev_attr_ecap.attr,
4137 NULL,
4138};
4139
4140static struct attribute_group intel_iommu_group = {
4141 .name = "intel-iommu",
4142 .attrs = intel_iommu_attrs,
4143};
4144
4145const struct attribute_group *intel_iommu_groups[] = {
4146 &intel_iommu_group,
4147 NULL,
4148};
4149
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004150int __init intel_iommu_init(void)
4151{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004152 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004153 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004154 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004155
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004156 /* VT-d is required for a TXT/tboot launch, so enforce that */
4157 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004158
Jiang Liu3a5670e2014-02-19 14:07:33 +08004159 if (iommu_init_mempool()) {
4160 if (force_on)
4161 panic("tboot: Failed to initialize iommu memory\n");
4162 return -ENOMEM;
4163 }
4164
4165 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004166 if (dmar_table_init()) {
4167 if (force_on)
4168 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004169 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004170 }
4171
Takao Indoh3a93c842013-04-23 17:35:03 +09004172 /*
4173 * Disable translation if already enabled prior to OS handover.
4174 */
Jiang Liu7c919772014-01-06 14:18:18 +08004175 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09004176 if (iommu->gcmd & DMA_GCMD_TE)
4177 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09004178
Suresh Siddhac2c72862011-08-23 17:05:19 -07004179 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004180 if (force_on)
4181 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004182 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004183 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004184
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004185 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08004186 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07004187
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004188 if (list_empty(&dmar_rmrr_units))
4189 printk(KERN_INFO "DMAR: No RMRR found\n");
4190
4191 if (list_empty(&dmar_atsr_units))
4192 printk(KERN_INFO "DMAR: No ATSR found\n");
4193
Joseph Cihula51a63e62011-03-21 11:04:24 -07004194 if (dmar_init_reserved_ranges()) {
4195 if (force_on)
4196 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004197 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004198 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004199
4200 init_no_remapping_devices();
4201
Joseph Cihulab7792602011-05-03 00:08:37 -07004202 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004203 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004204 if (force_on)
4205 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004206 printk(KERN_ERR "IOMMU: dmar init failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004207 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004208 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004209 up_write(&dmar_global_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004210 printk(KERN_INFO
4211 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4212
mark gross5e0d2a62008-03-04 15:22:08 -08004213 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004214#ifdef CONFIG_SWIOTLB
4215 swiotlb = 0;
4216#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004217 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004218
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004219 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004220
Alex Williamsona5459cf2014-06-12 16:12:31 -06004221 for_each_active_iommu(iommu, drhd)
4222 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4223 intel_iommu_groups,
4224 iommu->name);
4225
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004226 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004227 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004228 if (si_domain && !hw_pass_through)
4229 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004230
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004231 intel_iommu_enabled = 1;
4232
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004233 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004234
4235out_free_reserved_range:
4236 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004237out_free_dmar:
4238 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004239 up_write(&dmar_global_lock);
4240 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004241 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004242}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004243
Alex Williamson579305f2014-07-03 09:51:43 -06004244static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4245{
4246 struct intel_iommu *iommu = opaque;
4247
4248 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4249 return 0;
4250}
4251
4252/*
4253 * NB - intel-iommu lacks any sort of reference counting for the users of
4254 * dependent devices. If multiple endpoints have intersecting dependent
4255 * devices, unbinding the driver from any one of them will possibly leave
4256 * the others unable to operate.
4257 */
Han, Weidong3199aa62009-02-26 17:31:12 +08004258static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004259 struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004260{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004261 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004262 return;
4263
Alex Williamson579305f2014-07-03 09:51:43 -06004264 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004265}
4266
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004267static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004268 struct device *dev)
Weidong Hanc7151a82008-12-08 22:51:37 +08004269{
Yijing Wangbca2b912013-10-31 17:26:04 +08004270 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08004271 struct intel_iommu *iommu;
4272 unsigned long flags;
4273 int found = 0;
David Woodhouse156baca2014-03-09 14:00:57 -07004274 u8 bus, devfn;
Weidong Hanc7151a82008-12-08 22:51:37 +08004275
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004276 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08004277 if (!iommu)
4278 return;
4279
4280 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08004281 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004282 if (info->iommu == iommu && info->bus == bus &&
4283 info->devfn == devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01004284 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004285 spin_unlock_irqrestore(&device_domain_lock, flags);
4286
Yu Zhao93a23a72009-05-18 13:51:37 +08004287 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004288 iommu_detach_dev(iommu, info->bus, info->devfn);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004289 iommu_detach_dependent_devices(iommu, dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08004290 free_devinfo_mem(info);
4291
4292 spin_lock_irqsave(&device_domain_lock, flags);
4293
4294 if (found)
4295 break;
4296 else
4297 continue;
4298 }
4299
4300 /* if there is no other devices under the same iommu
4301 * owned by this domain, clear this iommu in iommu_bmp
4302 * update iommu count and coherency
4303 */
David Woodhouse8bbc4412014-03-09 13:52:37 -07004304 if (info->iommu == iommu)
Weidong Hanc7151a82008-12-08 22:51:37 +08004305 found = 1;
4306 }
4307
Roland Dreier3e7abe22011-07-20 06:22:21 -07004308 spin_unlock_irqrestore(&device_domain_lock, flags);
4309
Weidong Hanc7151a82008-12-08 22:51:37 +08004310 if (found == 0) {
Jiang Liufb170fb2014-07-11 14:19:28 +08004311 domain_detach_iommu(domain, iommu);
4312 if (!domain_type_is_vm_or_si(domain))
4313 iommu_detach_domain(domain, iommu);
Weidong Hanc7151a82008-12-08 22:51:37 +08004314 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004315}
4316
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004317static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08004318{
4319 int adjust_width;
4320
Robin Murphy0fb5fe82015-01-12 17:51:16 +00004321 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4322 DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004323 domain_reserve_special_ranges(domain);
4324
4325 /* calculate AGAW */
4326 domain->gaw = guest_width;
4327 adjust_width = guestwidth_to_adjustwidth(guest_width);
4328 domain->agaw = width_to_agaw(adjust_width);
4329
Weidong Han5e98c4b2008-12-08 23:03:27 +08004330 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004331 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004332 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004333 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004334
4335 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004336 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004337 if (!domain->pgd)
4338 return -ENOMEM;
4339 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4340 return 0;
4341}
4342
Joerg Roedel5d450802008-12-03 14:52:32 +01004343static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004344{
Joerg Roedel5d450802008-12-03 14:52:32 +01004345 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004346
Jiang Liuab8dfe22014-07-11 14:19:27 +08004347 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004348 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03004349 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004350 "intel_iommu_domain_init: dmar_domain == NULL\n");
4351 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004352 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004353 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03004354 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004355 "intel_iommu_domain_init() failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004356 domain_exit(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004357 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004358 }
Allen Kay8140a952011-10-14 12:32:17 -07004359 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004360 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004361
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004362 domain->geometry.aperture_start = 0;
4363 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4364 domain->geometry.force_aperture = true;
4365
Joerg Roedel5d450802008-12-03 14:52:32 +01004366 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004367}
Kay, Allen M38717942008-09-09 18:37:29 +03004368
Joerg Roedel5d450802008-12-03 14:52:32 +01004369static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004370{
Joerg Roedel5d450802008-12-03 14:52:32 +01004371 struct dmar_domain *dmar_domain = domain->priv;
4372
4373 domain->priv = NULL;
Jiang Liu92d03cc2014-02-19 14:07:28 +08004374 domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004375}
Kay, Allen M38717942008-09-09 18:37:29 +03004376
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004377static int intel_iommu_attach_device(struct iommu_domain *domain,
4378 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004379{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004380 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004381 struct intel_iommu *iommu;
4382 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004383 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004384
Alex Williamsonc875d2c2014-07-03 09:57:02 -06004385 if (device_is_rmrr_locked(dev)) {
4386 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4387 return -EPERM;
4388 }
4389
David Woodhouse7207d8f2014-03-09 16:31:06 -07004390 /* normally dev is not mapped */
4391 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004392 struct dmar_domain *old_domain;
4393
David Woodhouse1525a292014-03-06 16:19:30 +00004394 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004395 if (old_domain) {
Jiang Liuab8dfe22014-07-11 14:19:27 +08004396 if (domain_type_is_vm_or_si(dmar_domain))
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004397 domain_remove_one_dev_info(old_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004398 else
4399 domain_remove_dev_info(old_domain);
Joerg Roedel62c22162014-12-09 12:56:45 +01004400
4401 if (!domain_type_is_vm_or_si(old_domain) &&
4402 list_empty(&old_domain->devices))
4403 domain_exit(old_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004404 }
4405 }
4406
David Woodhouse156baca2014-03-09 14:00:57 -07004407 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004408 if (!iommu)
4409 return -ENODEV;
4410
4411 /* check if this iommu agaw is sufficient for max mapped address */
4412 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004413 if (addr_width > cap_mgaw(iommu->cap))
4414 addr_width = cap_mgaw(iommu->cap);
4415
4416 if (dmar_domain->max_addr > (1LL << addr_width)) {
4417 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004418 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004419 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004420 return -EFAULT;
4421 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004422 dmar_domain->gaw = addr_width;
4423
4424 /*
4425 * Knock out extra levels of page tables if necessary
4426 */
4427 while (iommu->agaw < dmar_domain->agaw) {
4428 struct dma_pte *pte;
4429
4430 pte = dmar_domain->pgd;
4431 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004432 dmar_domain->pgd = (struct dma_pte *)
4433 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004434 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004435 }
4436 dmar_domain->agaw--;
4437 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004438
David Woodhouse5913c9b2014-03-09 16:27:31 -07004439 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004440}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004441
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004442static void intel_iommu_detach_device(struct iommu_domain *domain,
4443 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004444{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004445 struct dmar_domain *dmar_domain = domain->priv;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004446
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004447 domain_remove_one_dev_info(dmar_domain, dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004448}
Kay, Allen M38717942008-09-09 18:37:29 +03004449
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004450static int intel_iommu_map(struct iommu_domain *domain,
4451 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004452 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004453{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004454 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004455 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004456 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004457 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004458
Joerg Roedeldde57a22008-12-03 15:04:09 +01004459 if (iommu_prot & IOMMU_READ)
4460 prot |= DMA_PTE_READ;
4461 if (iommu_prot & IOMMU_WRITE)
4462 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08004463 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4464 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004465
David Woodhouse163cc522009-06-28 00:51:17 +01004466 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004467 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004468 u64 end;
4469
4470 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004471 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004472 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004473 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004474 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004475 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004476 return -EFAULT;
4477 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004478 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004479 }
David Woodhousead051222009-06-28 14:22:28 +01004480 /* Round up size to next multiple of PAGE_SIZE, if it and
4481 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004482 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004483 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4484 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004485 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004486}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004487
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004488static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004489 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004490{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004491 struct dmar_domain *dmar_domain = domain->priv;
David Woodhouseea8ea462014-03-05 17:09:32 +00004492 struct page *freelist = NULL;
4493 struct intel_iommu *iommu;
4494 unsigned long start_pfn, last_pfn;
4495 unsigned int npages;
4496 int iommu_id, num, ndomains, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004497
David Woodhouse5cf0a762014-03-19 16:07:49 +00004498 /* Cope with horrid API which requires us to unmap more than the
4499 size argument if it happens to be a large-page mapping. */
4500 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4501 BUG();
4502
4503 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4504 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4505
David Woodhouseea8ea462014-03-05 17:09:32 +00004506 start_pfn = iova >> VTD_PAGE_SHIFT;
4507 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4508
4509 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4510
4511 npages = last_pfn - start_pfn + 1;
4512
4513 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4514 iommu = g_iommus[iommu_id];
4515
4516 /*
4517 * find bit position of dmar_domain
4518 */
4519 ndomains = cap_ndoms(iommu->cap);
4520 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4521 if (iommu->domains[num] == dmar_domain)
4522 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4523 npages, !freelist, 0);
4524 }
4525
4526 }
4527
4528 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004529
David Woodhouse163cc522009-06-28 00:51:17 +01004530 if (dmar_domain->max_addr == iova + size)
4531 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004532
David Woodhouse5cf0a762014-03-19 16:07:49 +00004533 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004534}
Kay, Allen M38717942008-09-09 18:37:29 +03004535
Joerg Roedeld14d6572008-12-03 15:06:57 +01004536static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05304537 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004538{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004539 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004540 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004541 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004542 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004543
David Woodhouse5cf0a762014-03-19 16:07:49 +00004544 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004545 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004546 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004547
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004548 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004549}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004550
Joerg Roedel5d587b82014-09-05 10:50:45 +02004551static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004552{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004553 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004554 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04004555 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004556 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004557
Joerg Roedel5d587b82014-09-05 10:50:45 +02004558 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004559}
4560
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004561static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004562{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004563 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004564 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07004565 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004566
Alex Williamsona5459cf2014-06-12 16:12:31 -06004567 iommu = device_to_iommu(dev, &bus, &devfn);
4568 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004569 return -ENODEV;
4570
Alex Williamsona5459cf2014-06-12 16:12:31 -06004571 iommu_device_link(iommu->iommu_dev, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004572
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004573 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06004574
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004575 if (IS_ERR(group))
4576 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004577
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004578 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004579 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004580}
4581
4582static void intel_iommu_remove_device(struct device *dev)
4583{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004584 struct intel_iommu *iommu;
4585 u8 bus, devfn;
4586
4587 iommu = device_to_iommu(dev, &bus, &devfn);
4588 if (!iommu)
4589 return;
4590
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004591 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004592
4593 iommu_device_unlink(iommu->iommu_dev, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004594}
4595
Thierry Redingb22f6432014-06-27 09:03:12 +02004596static const struct iommu_ops intel_iommu_ops = {
Joerg Roedel5d587b82014-09-05 10:50:45 +02004597 .capable = intel_iommu_capable,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004598 .domain_init = intel_iommu_domain_init,
4599 .domain_destroy = intel_iommu_domain_destroy,
4600 .attach_dev = intel_iommu_attach_device,
4601 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004602 .map = intel_iommu_map,
4603 .unmap = intel_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07004604 .map_sg = default_iommu_map_sg,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004605 .iova_to_phys = intel_iommu_iova_to_phys,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004606 .add_device = intel_iommu_add_device,
4607 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004608 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004609};
David Woodhouse9af88142009-02-13 23:18:03 +00004610
Daniel Vetter94526182013-01-20 23:50:13 +01004611static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4612{
4613 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4614 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4615 dmar_map_gfx = 0;
4616}
4617
4618DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4619DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4620DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4621DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4622DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4623DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4624DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4625
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004626static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004627{
4628 /*
4629 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004630 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004631 */
4632 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4633 rwbf_quirk = 1;
4634}
4635
4636DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004637DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4638DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4639DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4640DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4641DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4642DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004643
Adam Jacksoneecfd572010-08-25 21:17:34 +01004644#define GGC 0x52
4645#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4646#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4647#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4648#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4649#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4650#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4651#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4652#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4653
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004654static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004655{
4656 unsigned short ggc;
4657
Adam Jacksoneecfd572010-08-25 21:17:34 +01004658 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004659 return;
4660
Adam Jacksoneecfd572010-08-25 21:17:34 +01004661 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004662 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4663 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004664 } else if (dmar_map_gfx) {
4665 /* we have to ensure the gfx device is idle before we flush */
4666 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4667 intel_iommu_strict = 1;
4668 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004669}
4670DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4671DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4672DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4673DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4674
David Woodhousee0fc7e02009-09-30 09:12:17 -07004675/* On Tylersburg chipsets, some BIOSes have been known to enable the
4676 ISOCH DMAR unit for the Azalia sound device, but not give it any
4677 TLB entries, which causes it to deadlock. Check for that. We do
4678 this in a function called from init_dmars(), instead of in a PCI
4679 quirk, because we don't want to print the obnoxious "BIOS broken"
4680 message if VT-d is actually disabled.
4681*/
4682static void __init check_tylersburg_isoch(void)
4683{
4684 struct pci_dev *pdev;
4685 uint32_t vtisochctrl;
4686
4687 /* If there's no Azalia in the system anyway, forget it. */
4688 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4689 if (!pdev)
4690 return;
4691 pci_dev_put(pdev);
4692
4693 /* System Management Registers. Might be hidden, in which case
4694 we can't do the sanity check. But that's OK, because the
4695 known-broken BIOSes _don't_ actually hide it, so far. */
4696 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4697 if (!pdev)
4698 return;
4699
4700 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4701 pci_dev_put(pdev);
4702 return;
4703 }
4704
4705 pci_dev_put(pdev);
4706
4707 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4708 if (vtisochctrl & 1)
4709 return;
4710
4711 /* Drop all bits other than the number of TLB entries */
4712 vtisochctrl &= 0x1c;
4713
4714 /* If we have the recommended number of TLB entries (16), fine. */
4715 if (vtisochctrl == 0x10)
4716 return;
4717
4718 /* Zero TLB entries? You get to ride the short bus to school. */
4719 if (!vtisochctrl) {
4720 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4721 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4722 dmi_get_system_info(DMI_BIOS_VENDOR),
4723 dmi_get_system_info(DMI_BIOS_VERSION),
4724 dmi_get_system_info(DMI_PRODUCT_VERSION));
4725 iommu_identity_mapping |= IDENTMAP_AZALIA;
4726 return;
4727 }
4728
4729 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4730 vtisochctrl);
4731}