blob: 2d1e05bdbb53f5901035294a71c65231b004a338 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070018 */
19
20#include <linux/init.h>
21#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080022#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040023#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070024#include <linux/slab.h>
25#include <linux/irq.h>
26#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/spinlock.h>
28#include <linux/pci.h>
29#include <linux/dmar.h>
30#include <linux/dma-mapping.h>
31#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080032#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080033#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030034#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010035#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010037#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100039#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020040#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080041#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070042#include <linux/dma-contiguous.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070043#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070044#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090045#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046
Joerg Roedel078e1ee2012-09-26 12:44:43 +020047#include "irq_remapping.h"
48
Fenghua Yu5b6985c2008-10-16 18:02:32 -070049#define ROOT_SIZE VTD_PAGE_SIZE
50#define CONTEXT_SIZE VTD_PAGE_SIZE
51
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070054#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055
56#define IOAPIC_RANGE_START (0xfee00000)
57#define IOAPIC_RANGE_END (0xfeefffff)
58#define IOVA_START_ADDR (0x1000)
59
60#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
61
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070062#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080063#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070064
David Woodhouse2ebe3152009-09-19 07:34:04 -070065#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
66#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
67
68/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
69 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
70#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
71 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
72#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070073
Robin Murphy1b722502015-01-12 17:51:15 +000074/* IO virtual address start page frame number */
75#define IOVA_START_PFN (1)
76
Mark McLoughlinf27be032008-11-20 15:49:43 +000077#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070078#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070079#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080080
Andrew Mortondf08cdc2010-09-22 13:05:11 -070081/* page table handling */
82#define LEVEL_STRIDE (9)
83#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
84
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020085/*
86 * This bitmap is used to advertise the page sizes our hardware support
87 * to the IOMMU core, which will then use this information to split
88 * physically contiguous memory regions it is mapping into page sizes
89 * that we support.
90 *
91 * Traditionally the IOMMU core just handed us the mappings directly,
92 * after making sure the size is an order of a 4KiB page and that the
93 * mapping has natural alignment.
94 *
95 * To retain this behavior, we currently advertise that we support
96 * all page sizes that are an order of 4KiB.
97 *
98 * If at some point we'd like to utilize the IOMMU core's new behavior,
99 * we could change this to advertise the real page sizes we support.
100 */
101#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
102
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700103static inline int agaw_to_level(int agaw)
104{
105 return agaw + 2;
106}
107
108static inline int agaw_to_width(int agaw)
109{
Jiang Liu5c645b32014-01-06 14:18:12 +0800110 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700111}
112
113static inline int width_to_agaw(int width)
114{
Jiang Liu5c645b32014-01-06 14:18:12 +0800115 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700116}
117
118static inline unsigned int level_to_offset_bits(int level)
119{
120 return (level - 1) * LEVEL_STRIDE;
121}
122
123static inline int pfn_level_offset(unsigned long pfn, int level)
124{
125 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
126}
127
128static inline unsigned long level_mask(int level)
129{
130 return -1UL << level_to_offset_bits(level);
131}
132
133static inline unsigned long level_size(int level)
134{
135 return 1UL << level_to_offset_bits(level);
136}
137
138static inline unsigned long align_to_level(unsigned long pfn, int level)
139{
140 return (pfn + level_size(level) - 1) & level_mask(level);
141}
David Woodhousefd18de52009-05-10 23:57:41 +0100142
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100143static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
144{
Jiang Liu5c645b32014-01-06 14:18:12 +0800145 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100146}
147
David Woodhousedd4e8312009-06-27 16:21:20 +0100148/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
149 are never going to work. */
150static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
151{
152 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
153}
154
155static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
156{
157 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
158}
159static inline unsigned long page_to_dma_pfn(struct page *pg)
160{
161 return mm_to_dma_pfn(page_to_pfn(pg));
162}
163static inline unsigned long virt_to_dma_pfn(void *p)
164{
165 return page_to_dma_pfn(virt_to_page(p));
166}
167
Weidong Hand9630fe2008-12-08 11:06:32 +0800168/* global iommu list, set NULL for ignored DMAR units */
169static struct intel_iommu **g_iommus;
170
David Woodhousee0fc7e02009-09-30 09:12:17 -0700171static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000172static int rwbf_quirk;
173
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000174/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700175 * set to 1 to panic kernel if can't successfully enable VT-d
176 * (used when kernel is launched w/ TXT)
177 */
178static int force_on = 0;
179
180/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000181 * 0: Present
182 * 1-11: Reserved
183 * 12-63: Context Ptr (12 - (haw-1))
184 * 64-127: Reserved
185 */
186struct root_entry {
187 u64 val;
188 u64 rsvd1;
189};
190#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
191static inline bool root_present(struct root_entry *root)
192{
193 return (root->val & 1);
194}
195static inline void set_root_present(struct root_entry *root)
196{
197 root->val |= 1;
198}
199static inline void set_root_value(struct root_entry *root, unsigned long value)
200{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800201 root->val &= ~VTD_PAGE_MASK;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000202 root->val |= value & VTD_PAGE_MASK;
203}
204
205static inline struct context_entry *
206get_context_addr_from_root(struct root_entry *root)
207{
208 return (struct context_entry *)
209 (root_present(root)?phys_to_virt(
210 root->val & VTD_PAGE_MASK) :
211 NULL);
212}
213
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000214/*
215 * low 64 bits:
216 * 0: present
217 * 1: fault processing disable
218 * 2-3: translation type
219 * 12-63: address space root
220 * high 64 bits:
221 * 0-2: address width
222 * 3-6: aval
223 * 8-23: domain id
224 */
225struct context_entry {
226 u64 lo;
227 u64 hi;
228};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000229
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000230static inline bool context_present(struct context_entry *context)
231{
232 return (context->lo & 1);
233}
234static inline void context_set_present(struct context_entry *context)
235{
236 context->lo |= 1;
237}
238
239static inline void context_set_fault_enable(struct context_entry *context)
240{
241 context->lo &= (((u64)-1) << 2) | 1;
242}
243
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000244static inline void context_set_translation_type(struct context_entry *context,
245 unsigned long value)
246{
247 context->lo &= (((u64)-1) << 4) | 3;
248 context->lo |= (value & 3) << 2;
249}
250
251static inline void context_set_address_root(struct context_entry *context,
252 unsigned long value)
253{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800254 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000255 context->lo |= value & VTD_PAGE_MASK;
256}
257
258static inline void context_set_address_width(struct context_entry *context,
259 unsigned long value)
260{
261 context->hi |= value & 7;
262}
263
264static inline void context_set_domain_id(struct context_entry *context,
265 unsigned long value)
266{
267 context->hi |= (value & ((1 << 16) - 1)) << 8;
268}
269
270static inline void context_clear_entry(struct context_entry *context)
271{
272 context->lo = 0;
273 context->hi = 0;
274}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000275
Mark McLoughlin622ba122008-11-20 15:49:46 +0000276/*
277 * 0: readable
278 * 1: writable
279 * 2-6: reserved
280 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800281 * 8-10: available
282 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000283 * 12-63: Host physcial address
284 */
285struct dma_pte {
286 u64 val;
287};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000288
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000289static inline void dma_clear_pte(struct dma_pte *pte)
290{
291 pte->val = 0;
292}
293
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000294static inline u64 dma_pte_addr(struct dma_pte *pte)
295{
David Woodhousec85994e2009-07-01 19:21:24 +0100296#ifdef CONFIG_64BIT
297 return pte->val & VTD_PAGE_MASK;
298#else
299 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100300 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100301#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000302}
303
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000304static inline bool dma_pte_present(struct dma_pte *pte)
305{
306 return (pte->val & 3) != 0;
307}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000308
Allen Kay4399c8b2011-10-14 12:32:46 -0700309static inline bool dma_pte_superpage(struct dma_pte *pte)
310{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200311 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700312}
313
David Woodhouse75e6bf92009-07-02 11:21:16 +0100314static inline int first_pte_in_page(struct dma_pte *pte)
315{
316 return !((unsigned long)pte & ~VTD_PAGE_MASK);
317}
318
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700319/*
320 * This domain is a statically identity mapping domain.
321 * 1. This domain creats a static 1:1 mapping to all usable memory.
322 * 2. It maps to each iommu if successful.
323 * 3. Each iommu mapps to this domain if successful.
324 */
David Woodhouse19943b02009-08-04 16:19:20 +0100325static struct dmar_domain *si_domain;
326static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700327
Weidong Han1ce28fe2008-12-08 16:35:39 +0800328/* domain represents a virtual machine, more than one devices
329 * across iommus may be owned in one domain, e.g. kvm guest.
330 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800331#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800332
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700333/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800334#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700335
Mark McLoughlin99126f72008-11-20 15:49:47 +0000336struct dmar_domain {
337 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700338 int nid; /* node id */
Jiang Liu78d8e702014-11-09 22:47:57 +0800339 DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
Mike Travis1b198bb2012-03-05 15:05:16 -0800340 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000341
342 struct list_head devices; /* all devices' list */
343 struct iova_domain iovad; /* iova's that belong to this domain */
344
345 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000346 int gaw; /* max guest address width */
347
348 /* adjusted guest address width, 0 is level 2 30-bit */
349 int agaw;
350
Weidong Han3b5410e2008-12-08 09:17:15 +0800351 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800352
353 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800354 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800355 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100356 int iommu_superpage;/* Level of superpages supported:
357 0 == 4KiB (no superpages), 1 == 2MiB,
358 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800359 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800360 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000361};
362
Mark McLoughlina647dac2008-11-20 15:49:48 +0000363/* PCI domain-device relationship */
364struct device_domain_info {
365 struct list_head link; /* link to domain siblings */
366 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100367 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000368 u8 devfn; /* PCI devfn number */
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000369 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800370 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000371 struct dmar_domain *domain; /* pointer to domain */
372};
373
Jiang Liub94e4112014-02-19 14:07:25 +0800374struct dmar_rmrr_unit {
375 struct list_head list; /* list of rmrr units */
376 struct acpi_dmar_header *hdr; /* ACPI header */
377 u64 base_address; /* reserved base address*/
378 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000379 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800380 int devices_cnt; /* target device count */
381};
382
383struct dmar_atsr_unit {
384 struct list_head list; /* list of ATSR units */
385 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000386 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800387 int devices_cnt; /* target device count */
388 u8 include_all:1; /* include all ports */
389};
390
391static LIST_HEAD(dmar_atsr_units);
392static LIST_HEAD(dmar_rmrr_units);
393
394#define for_each_rmrr_units(rmrr) \
395 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
396
mark gross5e0d2a62008-03-04 15:22:08 -0800397static void flush_unmaps_timeout(unsigned long data);
398
Jiang Liub707cb02014-01-06 14:18:26 +0800399static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800400
mark gross80b20dd2008-04-18 13:53:58 -0700401#define HIGH_WATER_MARK 250
402struct deferred_flush_tables {
403 int next;
404 struct iova *iova[HIGH_WATER_MARK];
405 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000406 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700407};
408
409static struct deferred_flush_tables *deferred_flush;
410
mark gross5e0d2a62008-03-04 15:22:08 -0800411/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800412static int g_num_of_iommus;
413
414static DEFINE_SPINLOCK(async_umap_flush_lock);
415static LIST_HEAD(unmaps_to_do);
416
417static int timer_on;
418static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800419
Jiang Liu92d03cc2014-02-19 14:07:28 +0800420static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700421static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800422static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -0700423 struct device *dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800424static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000425 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800426static int domain_detach_iommu(struct dmar_domain *domain,
427 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700428
Suresh Siddhad3f13812011-08-23 17:05:25 -0700429#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800430int dmar_disabled = 0;
431#else
432int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700433#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800434
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200435int intel_iommu_enabled = 0;
436EXPORT_SYMBOL_GPL(intel_iommu_enabled);
437
David Woodhouse2d9e6672010-06-15 10:57:57 +0100438static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700439static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800440static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100441static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700442
David Woodhousec0771df2011-10-14 20:59:46 +0100443int intel_iommu_gfx_mapped;
444EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
445
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700446#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
447static DEFINE_SPINLOCK(device_domain_lock);
448static LIST_HEAD(device_domain_list);
449
Thierry Redingb22f6432014-06-27 09:03:12 +0200450static const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100451
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700452static int __init intel_iommu_setup(char *str)
453{
454 if (!str)
455 return -EINVAL;
456 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800457 if (!strncmp(str, "on", 2)) {
458 dmar_disabled = 0;
459 printk(KERN_INFO "Intel-IOMMU: enabled\n");
460 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700461 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800462 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700463 } else if (!strncmp(str, "igfx_off", 8)) {
464 dmar_map_gfx = 0;
465 printk(KERN_INFO
466 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700467 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800468 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700469 "Intel-IOMMU: Forcing DAC for PCI devices\n");
470 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800471 } else if (!strncmp(str, "strict", 6)) {
472 printk(KERN_INFO
473 "Intel-IOMMU: disable batched IOTLB flush\n");
474 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100475 } else if (!strncmp(str, "sp_off", 6)) {
476 printk(KERN_INFO
477 "Intel-IOMMU: disable supported super page\n");
478 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700479 }
480
481 str += strcspn(str, ",");
482 while (*str == ',')
483 str++;
484 }
485 return 0;
486}
487__setup("intel_iommu=", intel_iommu_setup);
488
489static struct kmem_cache *iommu_domain_cache;
490static struct kmem_cache *iommu_devinfo_cache;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700491
Suresh Siddha4c923d42009-10-02 11:01:24 -0700492static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700493{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700494 struct page *page;
495 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700496
Suresh Siddha4c923d42009-10-02 11:01:24 -0700497 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
498 if (page)
499 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700500 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700501}
502
503static inline void free_pgtable_page(void *vaddr)
504{
505 free_page((unsigned long)vaddr);
506}
507
508static inline void *alloc_domain_mem(void)
509{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900510 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700511}
512
Kay, Allen M38717942008-09-09 18:37:29 +0300513static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700514{
515 kmem_cache_free(iommu_domain_cache, vaddr);
516}
517
518static inline void * alloc_devinfo_mem(void)
519{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900520 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700521}
522
523static inline void free_devinfo_mem(void *vaddr)
524{
525 kmem_cache_free(iommu_devinfo_cache, vaddr);
526}
527
Jiang Liuab8dfe22014-07-11 14:19:27 +0800528static inline int domain_type_is_vm(struct dmar_domain *domain)
529{
530 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
531}
532
533static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
534{
535 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
536 DOMAIN_FLAG_STATIC_IDENTITY);
537}
Weidong Han1b573682008-12-08 15:34:06 +0800538
Jiang Liu162d1b12014-07-11 14:19:35 +0800539static inline int domain_pfn_supported(struct dmar_domain *domain,
540 unsigned long pfn)
541{
542 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
543
544 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
545}
546
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700547static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800548{
549 unsigned long sagaw;
550 int agaw = -1;
551
552 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700553 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800554 agaw >= 0; agaw--) {
555 if (test_bit(agaw, &sagaw))
556 break;
557 }
558
559 return agaw;
560}
561
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700562/*
563 * Calculate max SAGAW for each iommu.
564 */
565int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
566{
567 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
568}
569
570/*
571 * calculate agaw for each iommu.
572 * "SAGAW" may be different across iommus, use a default agaw, and
573 * get a supported less agaw for iommus that don't support the default agaw.
574 */
575int iommu_calculate_agaw(struct intel_iommu *iommu)
576{
577 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
578}
579
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700580/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800581static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
582{
583 int iommu_id;
584
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700585 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800586 BUG_ON(domain_type_is_vm_or_si(domain));
Mike Travis1b198bb2012-03-05 15:05:16 -0800587 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800588 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
589 return NULL;
590
591 return g_iommus[iommu_id];
592}
593
Weidong Han8e6040972008-12-08 15:49:06 +0800594static void domain_update_iommu_coherency(struct dmar_domain *domain)
595{
David Woodhoused0501962014-03-11 17:10:29 -0700596 struct dmar_drhd_unit *drhd;
597 struct intel_iommu *iommu;
598 int i, found = 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800599
David Woodhoused0501962014-03-11 17:10:29 -0700600 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800601
Mike Travis1b198bb2012-03-05 15:05:16 -0800602 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
David Woodhoused0501962014-03-11 17:10:29 -0700603 found = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800604 if (!ecap_coherent(g_iommus[i]->ecap)) {
605 domain->iommu_coherency = 0;
606 break;
607 }
Weidong Han8e6040972008-12-08 15:49:06 +0800608 }
David Woodhoused0501962014-03-11 17:10:29 -0700609 if (found)
610 return;
611
612 /* No hardware attached; use lowest common denominator */
613 rcu_read_lock();
614 for_each_active_iommu(iommu, drhd) {
615 if (!ecap_coherent(iommu->ecap)) {
616 domain->iommu_coherency = 0;
617 break;
618 }
619 }
620 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800621}
622
Jiang Liu161f6932014-07-11 14:19:37 +0800623static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100624{
Allen Kay8140a952011-10-14 12:32:17 -0700625 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800626 struct intel_iommu *iommu;
627 int ret = 1;
628
629 rcu_read_lock();
630 for_each_active_iommu(iommu, drhd) {
631 if (iommu != skip) {
632 if (!ecap_sc_support(iommu->ecap)) {
633 ret = 0;
634 break;
635 }
636 }
637 }
638 rcu_read_unlock();
639
640 return ret;
641}
642
643static int domain_update_iommu_superpage(struct intel_iommu *skip)
644{
645 struct dmar_drhd_unit *drhd;
646 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700647 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100648
649 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800650 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100651 }
652
Allen Kay8140a952011-10-14 12:32:17 -0700653 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800654 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700655 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800656 if (iommu != skip) {
657 mask &= cap_super_page_val(iommu->cap);
658 if (!mask)
659 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100660 }
661 }
Jiang Liu0e242612014-02-19 14:07:34 +0800662 rcu_read_unlock();
663
Jiang Liu161f6932014-07-11 14:19:37 +0800664 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100665}
666
Sheng Yang58c610b2009-03-18 15:33:05 +0800667/* Some capabilities may be different across iommus */
668static void domain_update_iommu_cap(struct dmar_domain *domain)
669{
670 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800671 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
672 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800673}
674
David Woodhouse156baca2014-03-09 14:00:57 -0700675static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800676{
677 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800678 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700679 struct device *tmp;
680 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800681 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800682 int i;
683
David Woodhouse156baca2014-03-09 14:00:57 -0700684 if (dev_is_pci(dev)) {
685 pdev = to_pci_dev(dev);
686 segment = pci_domain_nr(pdev->bus);
687 } else if (ACPI_COMPANION(dev))
688 dev = &ACPI_COMPANION(dev)->dev;
689
Jiang Liu0e242612014-02-19 14:07:34 +0800690 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800691 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700692 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100693 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800694
Jiang Liub683b232014-02-19 14:07:32 +0800695 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700696 drhd->devices_cnt, i, tmp) {
697 if (tmp == dev) {
698 *bus = drhd->devices[i].bus;
699 *devfn = drhd->devices[i].devfn;
700 goto out;
701 }
702
703 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000704 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700705
706 ptmp = to_pci_dev(tmp);
707 if (ptmp->subordinate &&
708 ptmp->subordinate->number <= pdev->bus->number &&
709 ptmp->subordinate->busn_res.end >= pdev->bus->number)
710 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100711 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800712
David Woodhouse156baca2014-03-09 14:00:57 -0700713 if (pdev && drhd->include_all) {
714 got_pdev:
715 *bus = pdev->bus->number;
716 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800717 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700718 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800719 }
Jiang Liub683b232014-02-19 14:07:32 +0800720 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700721 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800722 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800723
Jiang Liub683b232014-02-19 14:07:32 +0800724 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800725}
726
Weidong Han5331fe62008-12-08 23:00:00 +0800727static void domain_flush_cache(struct dmar_domain *domain,
728 void *addr, int size)
729{
730 if (!domain->iommu_coherency)
731 clflush_cache_range(addr, size);
732}
733
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700734/* Gets context entry for a given bus and devfn */
735static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
736 u8 bus, u8 devfn)
737{
738 struct root_entry *root;
739 struct context_entry *context;
740 unsigned long phy_addr;
741 unsigned long flags;
742
743 spin_lock_irqsave(&iommu->lock, flags);
744 root = &iommu->root_entry[bus];
745 context = get_context_addr_from_root(root);
746 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700747 context = (struct context_entry *)
748 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700749 if (!context) {
750 spin_unlock_irqrestore(&iommu->lock, flags);
751 return NULL;
752 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700753 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700754 phy_addr = virt_to_phys((void *)context);
755 set_root_value(root, phy_addr);
756 set_root_present(root);
757 __iommu_flush_cache(iommu, root, sizeof(*root));
758 }
759 spin_unlock_irqrestore(&iommu->lock, flags);
760 return &context[devfn];
761}
762
763static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
764{
765 struct root_entry *root;
766 struct context_entry *context;
767 int ret;
768 unsigned long flags;
769
770 spin_lock_irqsave(&iommu->lock, flags);
771 root = &iommu->root_entry[bus];
772 context = get_context_addr_from_root(root);
773 if (!context) {
774 ret = 0;
775 goto out;
776 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000777 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700778out:
779 spin_unlock_irqrestore(&iommu->lock, flags);
780 return ret;
781}
782
783static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
784{
785 struct root_entry *root;
786 struct context_entry *context;
787 unsigned long flags;
788
789 spin_lock_irqsave(&iommu->lock, flags);
790 root = &iommu->root_entry[bus];
791 context = get_context_addr_from_root(root);
792 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000793 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700794 __iommu_flush_cache(iommu, &context[devfn], \
795 sizeof(*context));
796 }
797 spin_unlock_irqrestore(&iommu->lock, flags);
798}
799
800static void free_context_table(struct intel_iommu *iommu)
801{
802 struct root_entry *root;
803 int i;
804 unsigned long flags;
805 struct context_entry *context;
806
807 spin_lock_irqsave(&iommu->lock, flags);
808 if (!iommu->root_entry) {
809 goto out;
810 }
811 for (i = 0; i < ROOT_ENTRY_NR; i++) {
812 root = &iommu->root_entry[i];
813 context = get_context_addr_from_root(root);
814 if (context)
815 free_pgtable_page(context);
816 }
817 free_pgtable_page(iommu->root_entry);
818 iommu->root_entry = NULL;
819out:
820 spin_unlock_irqrestore(&iommu->lock, flags);
821}
822
David Woodhouseb026fd22009-06-28 10:37:25 +0100823static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000824 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700825{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700826 struct dma_pte *parent, *pte = NULL;
827 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700828 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700829
830 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200831
Jiang Liu162d1b12014-07-11 14:19:35 +0800832 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200833 /* Address beyond IOMMU's addressing capabilities. */
834 return NULL;
835
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700836 parent = domain->pgd;
837
David Woodhouse5cf0a762014-03-19 16:07:49 +0000838 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700839 void *tmp_page;
840
David Woodhouseb026fd22009-06-28 10:37:25 +0100841 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700842 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000843 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100844 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000845 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700846 break;
847
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000848 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100849 uint64_t pteval;
850
Suresh Siddha4c923d42009-10-02 11:01:24 -0700851 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700852
David Woodhouse206a73c12009-07-01 19:30:28 +0100853 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700854 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100855
David Woodhousec85994e2009-07-01 19:21:24 +0100856 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400857 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +0800858 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +0100859 /* Someone else set it while we were thinking; use theirs. */
860 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +0800861 else
David Woodhousec85994e2009-07-01 19:21:24 +0100862 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700863 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000864 if (level == 1)
865 break;
866
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000867 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700868 level--;
869 }
870
David Woodhouse5cf0a762014-03-19 16:07:49 +0000871 if (!*target_level)
872 *target_level = level;
873
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700874 return pte;
875}
876
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100877
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700878/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100879static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
880 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100881 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700882{
883 struct dma_pte *parent, *pte = NULL;
884 int total = agaw_to_level(domain->agaw);
885 int offset;
886
887 parent = domain->pgd;
888 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100889 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700890 pte = &parent[offset];
891 if (level == total)
892 return pte;
893
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100894 if (!dma_pte_present(pte)) {
895 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700896 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100897 }
898
Yijing Wange16922a2014-05-20 20:37:51 +0800899 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100900 *large_page = total;
901 return pte;
902 }
903
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000904 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700905 total--;
906 }
907 return NULL;
908}
909
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700910/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +0000911static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100912 unsigned long start_pfn,
913 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700914{
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100915 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100916 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700917
Jiang Liu162d1b12014-07-11 14:19:35 +0800918 BUG_ON(!domain_pfn_supported(domain, start_pfn));
919 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700920 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100921
David Woodhouse04b18e62009-06-27 19:15:01 +0100922 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700923 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100924 large_page = 1;
925 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100926 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100927 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100928 continue;
929 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100930 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100931 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100932 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100933 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100934 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
935
David Woodhouse310a5ab2009-06-28 18:52:20 +0100936 domain_flush_cache(domain, first_pte,
937 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700938
939 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700940}
941
Alex Williamson3269ee02013-06-15 10:27:19 -0600942static void dma_pte_free_level(struct dmar_domain *domain, int level,
943 struct dma_pte *pte, unsigned long pfn,
944 unsigned long start_pfn, unsigned long last_pfn)
945{
946 pfn = max(start_pfn, pfn);
947 pte = &pte[pfn_level_offset(pfn, level)];
948
949 do {
950 unsigned long level_pfn;
951 struct dma_pte *level_pte;
952
953 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
954 goto next;
955
956 level_pfn = pfn & level_mask(level - 1);
957 level_pte = phys_to_virt(dma_pte_addr(pte));
958
959 if (level > 2)
960 dma_pte_free_level(domain, level - 1, level_pte,
961 level_pfn, start_pfn, last_pfn);
962
963 /* If range covers entire pagetable, free it */
964 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -0800965 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -0600966 dma_clear_pte(pte);
967 domain_flush_cache(domain, pte, sizeof(*pte));
968 free_pgtable_page(level_pte);
969 }
970next:
971 pfn += level_size(level);
972 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
973}
974
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700975/* free page table pages. last level pte should already be cleared */
976static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100977 unsigned long start_pfn,
978 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700979{
Jiang Liu162d1b12014-07-11 14:19:35 +0800980 BUG_ON(!domain_pfn_supported(domain, start_pfn));
981 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700982 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700983
Jiang Liud41a4ad2014-07-11 14:19:34 +0800984 dma_pte_clear_range(domain, start_pfn, last_pfn);
985
David Woodhousef3a0a522009-06-30 03:40:07 +0100986 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -0600987 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
988 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +0100989
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700990 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100991 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700992 free_pgtable_page(domain->pgd);
993 domain->pgd = NULL;
994 }
995}
996
David Woodhouseea8ea462014-03-05 17:09:32 +0000997/* When a page at a given level is being unlinked from its parent, we don't
998 need to *modify* it at all. All we need to do is make a list of all the
999 pages which can be freed just as soon as we've flushed the IOTLB and we
1000 know the hardware page-walk will no longer touch them.
1001 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1002 be freed. */
1003static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1004 int level, struct dma_pte *pte,
1005 struct page *freelist)
1006{
1007 struct page *pg;
1008
1009 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1010 pg->freelist = freelist;
1011 freelist = pg;
1012
1013 if (level == 1)
1014 return freelist;
1015
Jiang Liuadeb2592014-04-09 10:20:39 +08001016 pte = page_address(pg);
1017 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001018 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1019 freelist = dma_pte_list_pagetables(domain, level - 1,
1020 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001021 pte++;
1022 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001023
1024 return freelist;
1025}
1026
1027static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1028 struct dma_pte *pte, unsigned long pfn,
1029 unsigned long start_pfn,
1030 unsigned long last_pfn,
1031 struct page *freelist)
1032{
1033 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1034
1035 pfn = max(start_pfn, pfn);
1036 pte = &pte[pfn_level_offset(pfn, level)];
1037
1038 do {
1039 unsigned long level_pfn;
1040
1041 if (!dma_pte_present(pte))
1042 goto next;
1043
1044 level_pfn = pfn & level_mask(level);
1045
1046 /* If range covers entire pagetable, free it */
1047 if (start_pfn <= level_pfn &&
1048 last_pfn >= level_pfn + level_size(level) - 1) {
1049 /* These suborbinate page tables are going away entirely. Don't
1050 bother to clear them; we're just going to *free* them. */
1051 if (level > 1 && !dma_pte_superpage(pte))
1052 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1053
1054 dma_clear_pte(pte);
1055 if (!first_pte)
1056 first_pte = pte;
1057 last_pte = pte;
1058 } else if (level > 1) {
1059 /* Recurse down into a level that isn't *entirely* obsolete */
1060 freelist = dma_pte_clear_level(domain, level - 1,
1061 phys_to_virt(dma_pte_addr(pte)),
1062 level_pfn, start_pfn, last_pfn,
1063 freelist);
1064 }
1065next:
1066 pfn += level_size(level);
1067 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1068
1069 if (first_pte)
1070 domain_flush_cache(domain, first_pte,
1071 (void *)++last_pte - (void *)first_pte);
1072
1073 return freelist;
1074}
1075
1076/* We can't just free the pages because the IOMMU may still be walking
1077 the page tables, and may have cached the intermediate levels. The
1078 pages can only be freed after the IOTLB flush has been done. */
1079struct page *domain_unmap(struct dmar_domain *domain,
1080 unsigned long start_pfn,
1081 unsigned long last_pfn)
1082{
David Woodhouseea8ea462014-03-05 17:09:32 +00001083 struct page *freelist = NULL;
1084
Jiang Liu162d1b12014-07-11 14:19:35 +08001085 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1086 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001087 BUG_ON(start_pfn > last_pfn);
1088
1089 /* we don't need lock here; nobody else touches the iova range */
1090 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1091 domain->pgd, 0, start_pfn, last_pfn, NULL);
1092
1093 /* free pgd */
1094 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1095 struct page *pgd_page = virt_to_page(domain->pgd);
1096 pgd_page->freelist = freelist;
1097 freelist = pgd_page;
1098
1099 domain->pgd = NULL;
1100 }
1101
1102 return freelist;
1103}
1104
1105void dma_free_pagelist(struct page *freelist)
1106{
1107 struct page *pg;
1108
1109 while ((pg = freelist)) {
1110 freelist = pg->freelist;
1111 free_pgtable_page(page_address(pg));
1112 }
1113}
1114
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001115/* iommu handling */
1116static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1117{
1118 struct root_entry *root;
1119 unsigned long flags;
1120
Suresh Siddha4c923d42009-10-02 11:01:24 -07001121 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001122 if (!root) {
1123 pr_err("IOMMU: allocating root entry for %s failed\n",
1124 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001125 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001126 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001127
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001128 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001129
1130 spin_lock_irqsave(&iommu->lock, flags);
1131 iommu->root_entry = root;
1132 spin_unlock_irqrestore(&iommu->lock, flags);
1133
1134 return 0;
1135}
1136
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001137static void iommu_set_root_entry(struct intel_iommu *iommu)
1138{
1139 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001140 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001141 unsigned long flag;
1142
1143 addr = iommu->root_entry;
1144
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001145 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001146 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
1147
David Woodhousec416daa2009-05-10 20:30:58 +01001148 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001149
1150 /* Make sure hardware complete it */
1151 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001152 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001153
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001154 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001155}
1156
1157static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1158{
1159 u32 val;
1160 unsigned long flag;
1161
David Woodhouse9af88142009-02-13 23:18:03 +00001162 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001163 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001164
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001165 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001166 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001167
1168 /* Make sure hardware complete it */
1169 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001170 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001171
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001172 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001173}
1174
1175/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001176static void __iommu_flush_context(struct intel_iommu *iommu,
1177 u16 did, u16 source_id, u8 function_mask,
1178 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001179{
1180 u64 val = 0;
1181 unsigned long flag;
1182
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001183 switch (type) {
1184 case DMA_CCMD_GLOBAL_INVL:
1185 val = DMA_CCMD_GLOBAL_INVL;
1186 break;
1187 case DMA_CCMD_DOMAIN_INVL:
1188 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1189 break;
1190 case DMA_CCMD_DEVICE_INVL:
1191 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1192 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1193 break;
1194 default:
1195 BUG();
1196 }
1197 val |= DMA_CCMD_ICC;
1198
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001199 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001200 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1201
1202 /* Make sure hardware complete it */
1203 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1204 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1205
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001206 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001207}
1208
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001209/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001210static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1211 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001212{
1213 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1214 u64 val = 0, val_iva = 0;
1215 unsigned long flag;
1216
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001217 switch (type) {
1218 case DMA_TLB_GLOBAL_FLUSH:
1219 /* global flush doesn't need set IVA_REG */
1220 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1221 break;
1222 case DMA_TLB_DSI_FLUSH:
1223 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1224 break;
1225 case DMA_TLB_PSI_FLUSH:
1226 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001227 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001228 val_iva = size_order | addr;
1229 break;
1230 default:
1231 BUG();
1232 }
1233 /* Note: set drain read/write */
1234#if 0
1235 /*
1236 * This is probably to be super secure.. Looks like we can
1237 * ignore it without any impact.
1238 */
1239 if (cap_read_drain(iommu->cap))
1240 val |= DMA_TLB_READ_DRAIN;
1241#endif
1242 if (cap_write_drain(iommu->cap))
1243 val |= DMA_TLB_WRITE_DRAIN;
1244
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001245 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001246 /* Note: Only uses first TLB reg currently */
1247 if (val_iva)
1248 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1249 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1250
1251 /* Make sure hardware complete it */
1252 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1253 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1254
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001255 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001256
1257 /* check IOTLB invalidation granularity */
1258 if (DMA_TLB_IAIG(val) == 0)
1259 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1260 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1261 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001262 (unsigned long long)DMA_TLB_IIRG(type),
1263 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001264}
1265
David Woodhouse64ae8922014-03-09 12:52:30 -07001266static struct device_domain_info *
1267iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1268 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001269{
Yu Zhao93a23a72009-05-18 13:51:37 +08001270 int found = 0;
1271 unsigned long flags;
1272 struct device_domain_info *info;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001273 struct pci_dev *pdev;
Yu Zhao93a23a72009-05-18 13:51:37 +08001274
1275 if (!ecap_dev_iotlb_support(iommu->ecap))
1276 return NULL;
1277
1278 if (!iommu->qi)
1279 return NULL;
1280
1281 spin_lock_irqsave(&device_domain_lock, flags);
1282 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001283 if (info->iommu == iommu && info->bus == bus &&
1284 info->devfn == devfn) {
Yu Zhao93a23a72009-05-18 13:51:37 +08001285 found = 1;
1286 break;
1287 }
1288 spin_unlock_irqrestore(&device_domain_lock, flags);
1289
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001290 if (!found || !info->dev || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001291 return NULL;
1292
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001293 pdev = to_pci_dev(info->dev);
1294
1295 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Yu Zhao93a23a72009-05-18 13:51:37 +08001296 return NULL;
1297
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001298 if (!dmar_find_matched_atsr_unit(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001299 return NULL;
1300
Yu Zhao93a23a72009-05-18 13:51:37 +08001301 return info;
1302}
1303
1304static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1305{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001306 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001307 return;
1308
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001309 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Yu Zhao93a23a72009-05-18 13:51:37 +08001310}
1311
1312static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1313{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001314 if (!info->dev || !dev_is_pci(info->dev) ||
1315 !pci_ats_enabled(to_pci_dev(info->dev)))
Yu Zhao93a23a72009-05-18 13:51:37 +08001316 return;
1317
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001318 pci_disable_ats(to_pci_dev(info->dev));
Yu Zhao93a23a72009-05-18 13:51:37 +08001319}
1320
1321static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1322 u64 addr, unsigned mask)
1323{
1324 u16 sid, qdep;
1325 unsigned long flags;
1326 struct device_domain_info *info;
1327
1328 spin_lock_irqsave(&device_domain_lock, flags);
1329 list_for_each_entry(info, &domain->devices, link) {
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001330 struct pci_dev *pdev;
1331 if (!info->dev || !dev_is_pci(info->dev))
1332 continue;
1333
1334 pdev = to_pci_dev(info->dev);
1335 if (!pci_ats_enabled(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001336 continue;
1337
1338 sid = info->bus << 8 | info->devfn;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001339 qdep = pci_ats_queue_depth(pdev);
Yu Zhao93a23a72009-05-18 13:51:37 +08001340 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1341 }
1342 spin_unlock_irqrestore(&device_domain_lock, flags);
1343}
1344
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001345static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouseea8ea462014-03-05 17:09:32 +00001346 unsigned long pfn, unsigned int pages, int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001347{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001348 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001349 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001350
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001351 BUG_ON(pages == 0);
1352
David Woodhouseea8ea462014-03-05 17:09:32 +00001353 if (ih)
1354 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001355 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001356 * Fallback to domain selective flush if no PSI support or the size is
1357 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001358 * PSI requires page size to be 2 ^ x, and the base address is naturally
1359 * aligned to the size
1360 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001361 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1362 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001363 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001364 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001365 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001366 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001367
1368 /*
Nadav Amit82653632010-04-01 13:24:40 +03001369 * In caching mode, changes of pages from non-present to present require
1370 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001371 */
Nadav Amit82653632010-04-01 13:24:40 +03001372 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001373 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001374}
1375
mark grossf8bab732008-02-08 04:18:38 -08001376static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1377{
1378 u32 pmen;
1379 unsigned long flags;
1380
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001381 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001382 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1383 pmen &= ~DMA_PMEN_EPM;
1384 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1385
1386 /* wait for the protected region status bit to clear */
1387 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1388 readl, !(pmen & DMA_PMEN_PRS), pmen);
1389
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001390 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001391}
1392
Jiang Liu2a41cce2014-07-11 14:19:33 +08001393static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001394{
1395 u32 sts;
1396 unsigned long flags;
1397
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001398 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001399 iommu->gcmd |= DMA_GCMD_TE;
1400 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001401
1402 /* Make sure hardware complete it */
1403 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001404 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001405
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001406 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001407}
1408
Jiang Liu2a41cce2014-07-11 14:19:33 +08001409static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001410{
1411 u32 sts;
1412 unsigned long flag;
1413
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001414 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001415 iommu->gcmd &= ~DMA_GCMD_TE;
1416 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1417
1418 /* Make sure hardware complete it */
1419 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001420 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001421
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001422 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001423}
1424
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001425
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001426static int iommu_init_domains(struct intel_iommu *iommu)
1427{
1428 unsigned long ndomains;
1429 unsigned long nlongs;
1430
1431 ndomains = cap_ndoms(iommu->cap);
Jiang Liu852bdb02014-01-06 14:18:11 +08001432 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1433 iommu->seq_id, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001434 nlongs = BITS_TO_LONGS(ndomains);
1435
Donald Dutile94a91b52009-08-20 16:51:34 -04001436 spin_lock_init(&iommu->lock);
1437
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001438 /* TBD: there might be 64K domains,
1439 * consider other allocation for future chip
1440 */
1441 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1442 if (!iommu->domain_ids) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001443 pr_err("IOMMU%d: allocating domain id array failed\n",
1444 iommu->seq_id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001445 return -ENOMEM;
1446 }
1447 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1448 GFP_KERNEL);
1449 if (!iommu->domains) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001450 pr_err("IOMMU%d: allocating domain array failed\n",
1451 iommu->seq_id);
1452 kfree(iommu->domain_ids);
1453 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001454 return -ENOMEM;
1455 }
1456
1457 /*
1458 * if Caching mode is set, then invalid translations are tagged
1459 * with domainid 0. Hence we need to pre-allocate it.
1460 */
1461 if (cap_caching_mode(iommu->cap))
1462 set_bit(0, iommu->domain_ids);
1463 return 0;
1464}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001465
Jiang Liuffebeb42014-11-09 22:48:02 +08001466static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001467{
1468 struct dmar_domain *domain;
Jiang Liu2a46ddf2014-07-11 14:19:30 +08001469 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001470
Donald Dutile94a91b52009-08-20 16:51:34 -04001471 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001472 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001473 /*
1474 * Domain id 0 is reserved for invalid translation
1475 * if hardware supports caching mode.
1476 */
1477 if (cap_caching_mode(iommu->cap) && i == 0)
1478 continue;
1479
Donald Dutile94a91b52009-08-20 16:51:34 -04001480 domain = iommu->domains[i];
1481 clear_bit(i, iommu->domain_ids);
Jiang Liu129ad282014-07-11 14:19:31 +08001482 if (domain_detach_iommu(domain, iommu) == 0 &&
1483 !domain_type_is_vm(domain))
Jiang Liu92d03cc2014-02-19 14:07:28 +08001484 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001485 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001486 }
1487
1488 if (iommu->gcmd & DMA_GCMD_TE)
1489 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001490}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001491
Jiang Liuffebeb42014-11-09 22:48:02 +08001492static void free_dmar_iommu(struct intel_iommu *iommu)
1493{
1494 if ((iommu->domains) && (iommu->domain_ids)) {
1495 kfree(iommu->domains);
1496 kfree(iommu->domain_ids);
1497 iommu->domains = NULL;
1498 iommu->domain_ids = NULL;
1499 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001500
Weidong Hand9630fe2008-12-08 11:06:32 +08001501 g_iommus[iommu->seq_id] = NULL;
1502
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001503 /* free context mapping */
1504 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001505}
1506
Jiang Liuab8dfe22014-07-11 14:19:27 +08001507static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001508{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001509 /* domain id for virtual machine, it won't be set in context */
1510 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001511 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001512
1513 domain = alloc_domain_mem();
1514 if (!domain)
1515 return NULL;
1516
Jiang Liuab8dfe22014-07-11 14:19:27 +08001517 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001518 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001519 domain->flags = flags;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001520 spin_lock_init(&domain->iommu_lock);
1521 INIT_LIST_HEAD(&domain->devices);
Jiang Liuab8dfe22014-07-11 14:19:27 +08001522 if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001523 domain->id = atomic_inc_return(&vm_domid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001524
1525 return domain;
1526}
1527
Jiang Liufb170fb2014-07-11 14:19:28 +08001528static int __iommu_attach_domain(struct dmar_domain *domain,
1529 struct intel_iommu *iommu)
1530{
1531 int num;
1532 unsigned long ndomains;
1533
1534 ndomains = cap_ndoms(iommu->cap);
1535 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1536 if (num < ndomains) {
1537 set_bit(num, iommu->domain_ids);
1538 iommu->domains[num] = domain;
1539 } else {
1540 num = -ENOSPC;
1541 }
1542
1543 return num;
1544}
1545
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001546static int iommu_attach_domain(struct dmar_domain *domain,
1547 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001548{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001549 int num;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001550 unsigned long flags;
1551
Weidong Han8c11e792008-12-08 15:29:22 +08001552 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001553 num = __iommu_attach_domain(domain, iommu);
Jiang Liu44bde612014-07-11 14:19:29 +08001554 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001555 if (num < 0)
1556 pr_err("IOMMU: no free domain ids\n");
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001557
Jiang Liufb170fb2014-07-11 14:19:28 +08001558 return num;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001559}
1560
Jiang Liu44bde612014-07-11 14:19:29 +08001561static int iommu_attach_vm_domain(struct dmar_domain *domain,
1562 struct intel_iommu *iommu)
1563{
1564 int num;
1565 unsigned long ndomains;
1566
1567 ndomains = cap_ndoms(iommu->cap);
1568 for_each_set_bit(num, iommu->domain_ids, ndomains)
1569 if (iommu->domains[num] == domain)
1570 return num;
1571
1572 return __iommu_attach_domain(domain, iommu);
1573}
1574
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001575static void iommu_detach_domain(struct dmar_domain *domain,
1576 struct intel_iommu *iommu)
1577{
1578 unsigned long flags;
1579 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001580
1581 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001582 if (domain_type_is_vm_or_si(domain)) {
1583 ndomains = cap_ndoms(iommu->cap);
1584 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1585 if (iommu->domains[num] == domain) {
1586 clear_bit(num, iommu->domain_ids);
1587 iommu->domains[num] = NULL;
1588 break;
1589 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001590 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001591 } else {
1592 clear_bit(domain->id, iommu->domain_ids);
1593 iommu->domains[domain->id] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001594 }
Weidong Han8c11e792008-12-08 15:29:22 +08001595 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001596}
1597
Jiang Liufb170fb2014-07-11 14:19:28 +08001598static void domain_attach_iommu(struct dmar_domain *domain,
1599 struct intel_iommu *iommu)
1600{
1601 unsigned long flags;
1602
1603 spin_lock_irqsave(&domain->iommu_lock, flags);
1604 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1605 domain->iommu_count++;
1606 if (domain->iommu_count == 1)
1607 domain->nid = iommu->node;
1608 domain_update_iommu_cap(domain);
1609 }
1610 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1611}
1612
1613static int domain_detach_iommu(struct dmar_domain *domain,
1614 struct intel_iommu *iommu)
1615{
1616 unsigned long flags;
1617 int count = INT_MAX;
1618
1619 spin_lock_irqsave(&domain->iommu_lock, flags);
1620 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1621 count = --domain->iommu_count;
1622 domain_update_iommu_cap(domain);
1623 }
1624 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1625
1626 return count;
1627}
1628
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001629static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001630static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001631
Joseph Cihula51a63e62011-03-21 11:04:24 -07001632static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001633{
1634 struct pci_dev *pdev = NULL;
1635 struct iova *iova;
1636 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001637
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001638 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1639 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001640
Mark Gross8a443df2008-03-04 14:59:31 -08001641 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1642 &reserved_rbtree_key);
1643
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001644 /* IOAPIC ranges shouldn't be accessed by DMA */
1645 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1646 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001647 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001648 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001649 return -ENODEV;
1650 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001651
1652 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1653 for_each_pci_dev(pdev) {
1654 struct resource *r;
1655
1656 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1657 r = &pdev->resource[i];
1658 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1659 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001660 iova = reserve_iova(&reserved_iova_list,
1661 IOVA_PFN(r->start),
1662 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001663 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001664 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001665 return -ENODEV;
1666 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001667 }
1668 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001669 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001670}
1671
1672static void domain_reserve_special_ranges(struct dmar_domain *domain)
1673{
1674 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1675}
1676
1677static inline int guestwidth_to_adjustwidth(int gaw)
1678{
1679 int agaw;
1680 int r = (gaw - 12) % 9;
1681
1682 if (r == 0)
1683 agaw = gaw;
1684 else
1685 agaw = gaw + 9 - r;
1686 if (agaw > 64)
1687 agaw = 64;
1688 return agaw;
1689}
1690
1691static int domain_init(struct dmar_domain *domain, int guest_width)
1692{
1693 struct intel_iommu *iommu;
1694 int adjust_width, agaw;
1695 unsigned long sagaw;
1696
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001697 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1698 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001699 domain_reserve_special_ranges(domain);
1700
1701 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001702 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001703 if (guest_width > cap_mgaw(iommu->cap))
1704 guest_width = cap_mgaw(iommu->cap);
1705 domain->gaw = guest_width;
1706 adjust_width = guestwidth_to_adjustwidth(guest_width);
1707 agaw = width_to_agaw(adjust_width);
1708 sagaw = cap_sagaw(iommu->cap);
1709 if (!test_bit(agaw, &sagaw)) {
1710 /* hardware doesn't support it, choose a bigger one */
1711 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1712 agaw = find_next_bit(&sagaw, 5, agaw);
1713 if (agaw >= 5)
1714 return -ENODEV;
1715 }
1716 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001717
Weidong Han8e6040972008-12-08 15:49:06 +08001718 if (ecap_coherent(iommu->ecap))
1719 domain->iommu_coherency = 1;
1720 else
1721 domain->iommu_coherency = 0;
1722
Sheng Yang58c610b2009-03-18 15:33:05 +08001723 if (ecap_sc_support(iommu->ecap))
1724 domain->iommu_snooping = 1;
1725 else
1726 domain->iommu_snooping = 0;
1727
David Woodhouse214e39a2014-03-19 10:38:49 +00001728 if (intel_iommu_superpage)
1729 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1730 else
1731 domain->iommu_superpage = 0;
1732
Suresh Siddha4c923d42009-10-02 11:01:24 -07001733 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001734
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001735 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001736 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001737 if (!domain->pgd)
1738 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001739 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001740 return 0;
1741}
1742
1743static void domain_exit(struct dmar_domain *domain)
1744{
David Woodhouseea8ea462014-03-05 17:09:32 +00001745 struct page *freelist = NULL;
Alex Williamson71684402015-03-04 11:30:10 -07001746 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001747
1748 /* Domain 0 is reserved, so dont process it */
1749 if (!domain)
1750 return;
1751
Alex Williamson7b668352011-05-24 12:02:41 +01001752 /* Flush any lazy unmaps that may reference this domain */
1753 if (!intel_iommu_strict)
1754 flush_unmaps_timeout(0);
1755
Jiang Liu92d03cc2014-02-19 14:07:28 +08001756 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001757 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001758
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001759 /* destroy iovas */
1760 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001761
David Woodhouseea8ea462014-03-05 17:09:32 +00001762 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001763
Jiang Liu92d03cc2014-02-19 14:07:28 +08001764 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001765 rcu_read_lock();
Alex Williamson71684402015-03-04 11:30:10 -07001766 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
1767 iommu_detach_domain(domain, g_iommus[i]);
Jiang Liu0e242612014-02-19 14:07:34 +08001768 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001769
David Woodhouseea8ea462014-03-05 17:09:32 +00001770 dma_free_pagelist(freelist);
1771
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001772 free_domain_mem(domain);
1773}
1774
David Woodhouse64ae8922014-03-09 12:52:30 -07001775static int domain_context_mapping_one(struct dmar_domain *domain,
1776 struct intel_iommu *iommu,
1777 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001778{
1779 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001780 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001781 struct dma_pte *pgd;
Weidong Hanea6606b2008-12-08 23:08:15 +08001782 int id;
1783 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001784 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001785
1786 pr_debug("Set context mapping for %02x:%02x.%d\n",
1787 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001788
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001789 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001790 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1791 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001792
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001793 context = device_to_context_entry(iommu, bus, devfn);
1794 if (!context)
1795 return -ENOMEM;
1796 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001797 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001798 spin_unlock_irqrestore(&iommu->lock, flags);
1799 return 0;
1800 }
1801
Weidong Hanea6606b2008-12-08 23:08:15 +08001802 id = domain->id;
1803 pgd = domain->pgd;
1804
Jiang Liuab8dfe22014-07-11 14:19:27 +08001805 if (domain_type_is_vm_or_si(domain)) {
Jiang Liu44bde612014-07-11 14:19:29 +08001806 if (domain_type_is_vm(domain)) {
1807 id = iommu_attach_vm_domain(domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08001808 if (id < 0) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001809 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001810 pr_err("IOMMU: no free domain ids\n");
Weidong Hanea6606b2008-12-08 23:08:15 +08001811 return -EFAULT;
1812 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001813 }
1814
1815 /* Skip top levels of page tables for
1816 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001817 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001818 */
Chris Wright1672af12009-12-02 12:06:34 -08001819 if (translation != CONTEXT_TT_PASS_THROUGH) {
1820 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1821 pgd = phys_to_virt(dma_pte_addr(pgd));
1822 if (!dma_pte_present(pgd)) {
1823 spin_unlock_irqrestore(&iommu->lock, flags);
1824 return -ENOMEM;
1825 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001826 }
1827 }
1828 }
1829
1830 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001831
Yu Zhao93a23a72009-05-18 13:51:37 +08001832 if (translation != CONTEXT_TT_PASS_THROUGH) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001833 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Yu Zhao93a23a72009-05-18 13:51:37 +08001834 translation = info ? CONTEXT_TT_DEV_IOTLB :
1835 CONTEXT_TT_MULTI_LEVEL;
1836 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001837 /*
1838 * In pass through mode, AW must be programmed to indicate the largest
1839 * AGAW value supported by hardware. And ASR is ignored by hardware.
1840 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001841 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001842 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001843 else {
1844 context_set_address_root(context, virt_to_phys(pgd));
1845 context_set_address_width(context, iommu->agaw);
1846 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001847
1848 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001849 context_set_fault_enable(context);
1850 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001851 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001852
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001853 /*
1854 * It's a non-present to present mapping. If hardware doesn't cache
1855 * non-present entry we only need to flush the write-buffer. If the
1856 * _does_ cache non-present entries, then it does so in the special
1857 * domain #0, which we have to flush:
1858 */
1859 if (cap_caching_mode(iommu->cap)) {
1860 iommu->flush.flush_context(iommu, 0,
1861 (((u16)bus) << 8) | devfn,
1862 DMA_CCMD_MASK_NOBIT,
1863 DMA_CCMD_DEVICE_INVL);
Jiang Liu18fd7792014-07-11 14:19:26 +08001864 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001865 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001866 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001867 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001868 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001869 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001870
Jiang Liufb170fb2014-07-11 14:19:28 +08001871 domain_attach_iommu(domain, iommu);
1872
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001873 return 0;
1874}
1875
Alex Williamson579305f2014-07-03 09:51:43 -06001876struct domain_context_mapping_data {
1877 struct dmar_domain *domain;
1878 struct intel_iommu *iommu;
1879 int translation;
1880};
1881
1882static int domain_context_mapping_cb(struct pci_dev *pdev,
1883 u16 alias, void *opaque)
1884{
1885 struct domain_context_mapping_data *data = opaque;
1886
1887 return domain_context_mapping_one(data->domain, data->iommu,
1888 PCI_BUS_NUM(alias), alias & 0xff,
1889 data->translation);
1890}
1891
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001892static int
David Woodhousee1f167f2014-03-09 15:24:46 -07001893domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1894 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001895{
David Woodhouse64ae8922014-03-09 12:52:30 -07001896 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001897 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06001898 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001899
David Woodhousee1f167f2014-03-09 15:24:46 -07001900 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07001901 if (!iommu)
1902 return -ENODEV;
1903
Alex Williamson579305f2014-07-03 09:51:43 -06001904 if (!dev_is_pci(dev))
1905 return domain_context_mapping_one(domain, iommu, bus, devfn,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001906 translation);
Alex Williamson579305f2014-07-03 09:51:43 -06001907
1908 data.domain = domain;
1909 data.iommu = iommu;
1910 data.translation = translation;
1911
1912 return pci_for_each_dma_alias(to_pci_dev(dev),
1913 &domain_context_mapping_cb, &data);
1914}
1915
1916static int domain_context_mapped_cb(struct pci_dev *pdev,
1917 u16 alias, void *opaque)
1918{
1919 struct intel_iommu *iommu = opaque;
1920
1921 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001922}
1923
David Woodhousee1f167f2014-03-09 15:24:46 -07001924static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001925{
Weidong Han5331fe62008-12-08 23:00:00 +08001926 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001927 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08001928
David Woodhousee1f167f2014-03-09 15:24:46 -07001929 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001930 if (!iommu)
1931 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001932
Alex Williamson579305f2014-07-03 09:51:43 -06001933 if (!dev_is_pci(dev))
1934 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07001935
Alex Williamson579305f2014-07-03 09:51:43 -06001936 return !pci_for_each_dma_alias(to_pci_dev(dev),
1937 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001938}
1939
Fenghua Yuf5329592009-08-04 15:09:37 -07001940/* Returns a number of VTD pages, but aligned to MM page size */
1941static inline unsigned long aligned_nrpages(unsigned long host_addr,
1942 size_t size)
1943{
1944 host_addr &= ~PAGE_MASK;
1945 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1946}
1947
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001948/* Return largest possible superpage level for a given mapping */
1949static inline int hardware_largepage_caps(struct dmar_domain *domain,
1950 unsigned long iov_pfn,
1951 unsigned long phy_pfn,
1952 unsigned long pages)
1953{
1954 int support, level = 1;
1955 unsigned long pfnmerge;
1956
1957 support = domain->iommu_superpage;
1958
1959 /* To use a large page, the virtual *and* physical addresses
1960 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1961 of them will mean we have to use smaller pages. So just
1962 merge them and check both at once. */
1963 pfnmerge = iov_pfn | phy_pfn;
1964
1965 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1966 pages >>= VTD_STRIDE_SHIFT;
1967 if (!pages)
1968 break;
1969 pfnmerge >>= VTD_STRIDE_SHIFT;
1970 level++;
1971 support--;
1972 }
1973 return level;
1974}
1975
David Woodhouse9051aa02009-06-29 12:30:54 +01001976static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1977 struct scatterlist *sg, unsigned long phys_pfn,
1978 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001979{
1980 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001981 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08001982 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001983 unsigned int largepage_lvl = 0;
1984 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001985
Jiang Liu162d1b12014-07-11 14:19:35 +08001986 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01001987
1988 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1989 return -EINVAL;
1990
1991 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1992
Jiang Liucc4f14a2014-11-26 09:42:10 +08001993 if (!sg) {
1994 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01001995 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1996 }
1997
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001998 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001999 uint64_t tmp;
2000
David Woodhousee1605492009-06-29 11:17:38 +01002001 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07002002 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01002003 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2004 sg->dma_length = sg->length;
2005 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002006 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002007 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002008
David Woodhousee1605492009-06-29 11:17:38 +01002009 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002010 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2011
David Woodhouse5cf0a762014-03-19 16:07:49 +00002012 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002013 if (!pte)
2014 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002015 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002016 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002017 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002018 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2019 /*
2020 * Ensure that old small page tables are
2021 * removed to make room for superpage,
2022 * if they exist.
2023 */
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002024 dma_pte_free_pagetable(domain, iov_pfn,
Jiang Liud41a4ad2014-07-11 14:19:34 +08002025 iov_pfn + lvl_pages - 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002026 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002027 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002028 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002029
David Woodhousee1605492009-06-29 11:17:38 +01002030 }
2031 /* We don't need lock here, nobody else
2032 * touches the iova range
2033 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002034 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002035 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002036 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01002037 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2038 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002039 if (dumps) {
2040 dumps--;
2041 debug_dma_dump_mappings(NULL);
2042 }
2043 WARN_ON(1);
2044 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002045
2046 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2047
2048 BUG_ON(nr_pages < lvl_pages);
2049 BUG_ON(sg_res < lvl_pages);
2050
2051 nr_pages -= lvl_pages;
2052 iov_pfn += lvl_pages;
2053 phys_pfn += lvl_pages;
2054 pteval += lvl_pages * VTD_PAGE_SIZE;
2055 sg_res -= lvl_pages;
2056
2057 /* If the next PTE would be the first in a new page, then we
2058 need to flush the cache on the entries we've just written.
2059 And then we'll need to recalculate 'pte', so clear it and
2060 let it get set again in the if (!pte) block above.
2061
2062 If we're done (!nr_pages) we need to flush the cache too.
2063
2064 Also if we've been setting superpages, we may need to
2065 recalculate 'pte' and switch back to smaller pages for the
2066 end of the mapping, if the trailing size is not enough to
2067 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002068 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002069 if (!nr_pages || first_pte_in_page(pte) ||
2070 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002071 domain_flush_cache(domain, first_pte,
2072 (void *)pte - (void *)first_pte);
2073 pte = NULL;
2074 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002075
2076 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002077 sg = sg_next(sg);
2078 }
2079 return 0;
2080}
2081
David Woodhouse9051aa02009-06-29 12:30:54 +01002082static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2083 struct scatterlist *sg, unsigned long nr_pages,
2084 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002085{
David Woodhouse9051aa02009-06-29 12:30:54 +01002086 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2087}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002088
David Woodhouse9051aa02009-06-29 12:30:54 +01002089static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2090 unsigned long phys_pfn, unsigned long nr_pages,
2091 int prot)
2092{
2093 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002094}
2095
Weidong Hanc7151a82008-12-08 22:51:37 +08002096static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002097{
Weidong Hanc7151a82008-12-08 22:51:37 +08002098 if (!iommu)
2099 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002100
2101 clear_context_table(iommu, bus, devfn);
2102 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002103 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002104 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002105}
2106
David Woodhouse109b9b02012-05-25 17:43:02 +01002107static inline void unlink_domain_info(struct device_domain_info *info)
2108{
2109 assert_spin_locked(&device_domain_lock);
2110 list_del(&info->link);
2111 list_del(&info->global);
2112 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002113 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002114}
2115
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002116static void domain_remove_dev_info(struct dmar_domain *domain)
2117{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002118 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002119 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002120
2121 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wang3a74ca02014-05-20 20:37:47 +08002122 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhouse109b9b02012-05-25 17:43:02 +01002123 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002124 spin_unlock_irqrestore(&device_domain_lock, flags);
2125
Yu Zhao93a23a72009-05-18 13:51:37 +08002126 iommu_disable_dev_iotlb(info);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002127 iommu_detach_dev(info->iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002128
Jiang Liuab8dfe22014-07-11 14:19:27 +08002129 if (domain_type_is_vm(domain)) {
David Woodhouse7c7faa12014-03-09 13:33:06 -07002130 iommu_detach_dependent_devices(info->iommu, info->dev);
Jiang Liufb170fb2014-07-11 14:19:28 +08002131 domain_detach_iommu(domain, info->iommu);
Jiang Liu92d03cc2014-02-19 14:07:28 +08002132 }
2133
2134 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002135 spin_lock_irqsave(&device_domain_lock, flags);
2136 }
2137 spin_unlock_irqrestore(&device_domain_lock, flags);
2138}
2139
2140/*
2141 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002142 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002143 */
David Woodhouse1525a292014-03-06 16:19:30 +00002144static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002145{
2146 struct device_domain_info *info;
2147
2148 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002149 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002150 if (info)
2151 return info->domain;
2152 return NULL;
2153}
2154
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002155static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002156dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2157{
2158 struct device_domain_info *info;
2159
2160 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002161 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002162 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002163 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002164
2165 return NULL;
2166}
2167
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002168static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
David Woodhouse41e80dca2014-03-09 13:55:54 -07002169 int bus, int devfn,
David Woodhouseb718cd32014-03-09 13:11:33 -07002170 struct device *dev,
2171 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002172{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002173 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002174 struct device_domain_info *info;
2175 unsigned long flags;
2176
2177 info = alloc_devinfo_mem();
2178 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002179 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002180
Jiang Liu745f2582014-02-19 14:07:26 +08002181 info->bus = bus;
2182 info->devfn = devfn;
2183 info->dev = dev;
2184 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002185 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002186
2187 spin_lock_irqsave(&device_domain_lock, flags);
2188 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002189 found = find_domain(dev);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002190 else {
2191 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002192 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002193 if (info2)
2194 found = info2->domain;
2195 }
Jiang Liu745f2582014-02-19 14:07:26 +08002196 if (found) {
2197 spin_unlock_irqrestore(&device_domain_lock, flags);
2198 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002199 /* Caller must free the original domain */
2200 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002201 }
2202
David Woodhouseb718cd32014-03-09 13:11:33 -07002203 list_add(&info->link, &domain->devices);
2204 list_add(&info->global, &device_domain_list);
2205 if (dev)
2206 dev->archdata.iommu = info;
2207 spin_unlock_irqrestore(&device_domain_lock, flags);
2208
2209 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002210}
2211
Alex Williamson579305f2014-07-03 09:51:43 -06002212static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2213{
2214 *(u16 *)opaque = alias;
2215 return 0;
2216}
2217
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002218/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002219static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002220{
Alex Williamson579305f2014-07-03 09:51:43 -06002221 struct dmar_domain *domain, *tmp;
2222 struct intel_iommu *iommu;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002223 struct device_domain_info *info;
Alex Williamson579305f2014-07-03 09:51:43 -06002224 u16 dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002225 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002226 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002227
David Woodhouse146922e2014-03-09 15:44:17 -07002228 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002229 if (domain)
2230 return domain;
2231
David Woodhouse146922e2014-03-09 15:44:17 -07002232 iommu = device_to_iommu(dev, &bus, &devfn);
2233 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002234 return NULL;
2235
2236 if (dev_is_pci(dev)) {
2237 struct pci_dev *pdev = to_pci_dev(dev);
2238
2239 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2240
2241 spin_lock_irqsave(&device_domain_lock, flags);
2242 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2243 PCI_BUS_NUM(dma_alias),
2244 dma_alias & 0xff);
2245 if (info) {
2246 iommu = info->iommu;
2247 domain = info->domain;
2248 }
2249 spin_unlock_irqrestore(&device_domain_lock, flags);
2250
2251 /* DMA alias already has a domain, uses it */
2252 if (info)
2253 goto found_domain;
2254 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002255
David Woodhouse146922e2014-03-09 15:44:17 -07002256 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002257 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002258 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002259 return NULL;
Jiang Liu44bde612014-07-11 14:19:29 +08002260 domain->id = iommu_attach_domain(domain, iommu);
2261 if (domain->id < 0) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002262 free_domain_mem(domain);
Alex Williamson579305f2014-07-03 09:51:43 -06002263 return NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002264 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002265 domain_attach_iommu(domain, iommu);
Alex Williamson579305f2014-07-03 09:51:43 -06002266 if (domain_init(domain, gaw)) {
2267 domain_exit(domain);
2268 return NULL;
2269 }
2270
2271 /* register PCI DMA alias device */
2272 if (dev_is_pci(dev)) {
2273 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2274 dma_alias & 0xff, NULL, domain);
2275
2276 if (!tmp || tmp != domain) {
2277 domain_exit(domain);
2278 domain = tmp;
2279 }
2280
David Woodhouseb718cd32014-03-09 13:11:33 -07002281 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002282 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002283 }
2284
2285found_domain:
Alex Williamson579305f2014-07-03 09:51:43 -06002286 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2287
2288 if (!tmp || tmp != domain) {
2289 domain_exit(domain);
2290 domain = tmp;
2291 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002292
2293 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002294}
2295
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002296static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002297#define IDENTMAP_ALL 1
2298#define IDENTMAP_GFX 2
2299#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002300
David Woodhouseb2132032009-06-26 18:50:28 +01002301static int iommu_domain_identity_map(struct dmar_domain *domain,
2302 unsigned long long start,
2303 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002304{
David Woodhousec5395d52009-06-28 16:35:56 +01002305 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2306 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002307
David Woodhousec5395d52009-06-28 16:35:56 +01002308 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2309 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002310 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002311 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002312 }
2313
David Woodhousec5395d52009-06-28 16:35:56 +01002314 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2315 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002316 /*
2317 * RMRR range might have overlap with physical memory range,
2318 * clear it first
2319 */
David Woodhousec5395d52009-06-28 16:35:56 +01002320 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002321
David Woodhousec5395d52009-06-28 16:35:56 +01002322 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2323 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002324 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002325}
2326
David Woodhouse0b9d9752014-03-09 15:48:15 -07002327static int iommu_prepare_identity_map(struct device *dev,
David Woodhouseb2132032009-06-26 18:50:28 +01002328 unsigned long long start,
2329 unsigned long long end)
2330{
2331 struct dmar_domain *domain;
2332 int ret;
2333
David Woodhouse0b9d9752014-03-09 15:48:15 -07002334 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002335 if (!domain)
2336 return -ENOMEM;
2337
David Woodhouse19943b02009-08-04 16:19:20 +01002338 /* For _hardware_ passthrough, don't bother. But for software
2339 passthrough, we do it anyway -- it may indicate a memory
2340 range which is reserved in E820, so which didn't get set
2341 up to start with in si_domain */
2342 if (domain == si_domain && hw_pass_through) {
2343 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002344 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002345 return 0;
2346 }
2347
2348 printk(KERN_INFO
2349 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002350 dev_name(dev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002351
David Woodhouse5595b522009-12-02 09:21:55 +00002352 if (end < start) {
2353 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2354 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2355 dmi_get_system_info(DMI_BIOS_VENDOR),
2356 dmi_get_system_info(DMI_BIOS_VERSION),
2357 dmi_get_system_info(DMI_PRODUCT_VERSION));
2358 ret = -EIO;
2359 goto error;
2360 }
2361
David Woodhouse2ff729f2009-08-26 14:25:41 +01002362 if (end >> agaw_to_width(domain->agaw)) {
2363 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2364 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2365 agaw_to_width(domain->agaw),
2366 dmi_get_system_info(DMI_BIOS_VENDOR),
2367 dmi_get_system_info(DMI_BIOS_VERSION),
2368 dmi_get_system_info(DMI_PRODUCT_VERSION));
2369 ret = -EIO;
2370 goto error;
2371 }
David Woodhouse19943b02009-08-04 16:19:20 +01002372
David Woodhouseb2132032009-06-26 18:50:28 +01002373 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002374 if (ret)
2375 goto error;
2376
2377 /* context entry init */
David Woodhouse0b9d9752014-03-09 15:48:15 -07002378 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002379 if (ret)
2380 goto error;
2381
2382 return 0;
2383
2384 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002385 domain_exit(domain);
2386 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002387}
2388
2389static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002390 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002391{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002392 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002393 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002394 return iommu_prepare_identity_map(dev, rmrr->base_address,
2395 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002396}
2397
Suresh Siddhad3f13812011-08-23 17:05:25 -07002398#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002399static inline void iommu_prepare_isa(void)
2400{
2401 struct pci_dev *pdev;
2402 int ret;
2403
2404 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2405 if (!pdev)
2406 return;
2407
David Woodhousec7ab48d2009-06-26 19:10:36 +01002408 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002409 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002410
2411 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002412 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2413 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002414
Yijing Wang9b27e822014-05-20 20:37:52 +08002415 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002416}
2417#else
2418static inline void iommu_prepare_isa(void)
2419{
2420 return;
2421}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002422#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002423
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002424static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002425
Matt Kraai071e1372009-08-23 22:30:22 -07002426static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002427{
2428 struct dmar_drhd_unit *drhd;
2429 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002430 int nid, ret = 0;
Jiang Liu44bde612014-07-11 14:19:29 +08002431 bool first = true;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002432
Jiang Liuab8dfe22014-07-11 14:19:27 +08002433 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002434 if (!si_domain)
2435 return -EFAULT;
2436
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002437 for_each_active_iommu(iommu, drhd) {
2438 ret = iommu_attach_domain(si_domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08002439 if (ret < 0) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002440 domain_exit(si_domain);
2441 return -EFAULT;
Jiang Liu44bde612014-07-11 14:19:29 +08002442 } else if (first) {
2443 si_domain->id = ret;
2444 first = false;
2445 } else if (si_domain->id != ret) {
2446 domain_exit(si_domain);
2447 return -EFAULT;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002448 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002449 domain_attach_iommu(si_domain, iommu);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002450 }
2451
2452 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2453 domain_exit(si_domain);
2454 return -EFAULT;
2455 }
2456
Jiang Liu9544c002014-01-06 14:18:13 +08002457 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2458 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002459
David Woodhouse19943b02009-08-04 16:19:20 +01002460 if (hw)
2461 return 0;
2462
David Woodhousec7ab48d2009-06-26 19:10:36 +01002463 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002464 unsigned long start_pfn, end_pfn;
2465 int i;
2466
2467 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2468 ret = iommu_domain_identity_map(si_domain,
2469 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2470 if (ret)
2471 return ret;
2472 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002473 }
2474
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002475 return 0;
2476}
2477
David Woodhouse9b226622014-03-09 14:03:28 -07002478static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002479{
2480 struct device_domain_info *info;
2481
2482 if (likely(!iommu_identity_mapping))
2483 return 0;
2484
David Woodhouse9b226622014-03-09 14:03:28 -07002485 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002486 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2487 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002488
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002489 return 0;
2490}
2491
2492static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5913c9b2014-03-09 16:27:31 -07002493 struct device *dev, int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002494{
David Woodhouse0ac72662014-03-09 13:19:22 -07002495 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002496 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002497 u8 bus, devfn;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002498 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002499
David Woodhouse5913c9b2014-03-09 16:27:31 -07002500 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002501 if (!iommu)
2502 return -ENODEV;
2503
David Woodhouse5913c9b2014-03-09 16:27:31 -07002504 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002505 if (ndomain != domain)
2506 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002507
David Woodhouse5913c9b2014-03-09 16:27:31 -07002508 ret = domain_context_mapping(domain, dev, translation);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002509 if (ret) {
David Woodhouse5913c9b2014-03-09 16:27:31 -07002510 domain_remove_one_dev_info(domain, dev);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002511 return ret;
2512 }
2513
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002514 return 0;
2515}
2516
David Woodhouse0b9d9752014-03-09 15:48:15 -07002517static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002518{
2519 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002520 struct device *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002521 int i;
2522
Jiang Liu0e242612014-02-19 14:07:34 +08002523 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002524 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002525 /*
2526 * Return TRUE if this RMRR contains the device that
2527 * is passed in.
2528 */
2529 for_each_active_dev_scope(rmrr->devices,
2530 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002531 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002532 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002533 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002534 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002535 }
Jiang Liu0e242612014-02-19 14:07:34 +08002536 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002537 return false;
2538}
2539
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002540/*
2541 * There are a couple cases where we need to restrict the functionality of
2542 * devices associated with RMRRs. The first is when evaluating a device for
2543 * identity mapping because problems exist when devices are moved in and out
2544 * of domains and their respective RMRR information is lost. This means that
2545 * a device with associated RMRRs will never be in a "passthrough" domain.
2546 * The second is use of the device through the IOMMU API. This interface
2547 * expects to have full control of the IOVA space for the device. We cannot
2548 * satisfy both the requirement that RMRR access is maintained and have an
2549 * unencumbered IOVA space. We also have no ability to quiesce the device's
2550 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2551 * We therefore prevent devices associated with an RMRR from participating in
2552 * the IOMMU API, which eliminates them from device assignment.
2553 *
2554 * In both cases we assume that PCI USB devices with RMRRs have them largely
2555 * for historical reasons and that the RMRR space is not actively used post
2556 * boot. This exclusion may change if vendors begin to abuse it.
2557 */
2558static bool device_is_rmrr_locked(struct device *dev)
2559{
2560 if (!device_has_rmrr(dev))
2561 return false;
2562
2563 if (dev_is_pci(dev)) {
2564 struct pci_dev *pdev = to_pci_dev(dev);
2565
2566 if ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
2567 return false;
2568 }
2569
2570 return true;
2571}
2572
David Woodhouse3bdb2592014-03-09 16:03:08 -07002573static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002574{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002575
David Woodhouse3bdb2592014-03-09 16:03:08 -07002576 if (dev_is_pci(dev)) {
2577 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002578
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002579 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002580 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002581
David Woodhouse3bdb2592014-03-09 16:03:08 -07002582 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2583 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002584
David Woodhouse3bdb2592014-03-09 16:03:08 -07002585 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2586 return 1;
2587
2588 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2589 return 0;
2590
2591 /*
2592 * We want to start off with all devices in the 1:1 domain, and
2593 * take them out later if we find they can't access all of memory.
2594 *
2595 * However, we can't do this for PCI devices behind bridges,
2596 * because all PCI devices behind the same bridge will end up
2597 * with the same source-id on their transactions.
2598 *
2599 * Practically speaking, we can't change things around for these
2600 * devices at run-time, because we can't be sure there'll be no
2601 * DMA transactions in flight for any of their siblings.
2602 *
2603 * So PCI devices (unless they're on the root bus) as well as
2604 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2605 * the 1:1 domain, just in _case_ one of their siblings turns out
2606 * not to be able to map all of memory.
2607 */
2608 if (!pci_is_pcie(pdev)) {
2609 if (!pci_is_root_bus(pdev->bus))
2610 return 0;
2611 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2612 return 0;
2613 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2614 return 0;
2615 } else {
2616 if (device_has_rmrr(dev))
2617 return 0;
2618 }
David Woodhouse6941af22009-07-04 18:24:27 +01002619
David Woodhouse3dfc8132009-07-04 19:11:08 +01002620 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002621 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002622 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002623 * take them out of the 1:1 domain later.
2624 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002625 if (!startup) {
2626 /*
2627 * If the device's dma_mask is less than the system's memory
2628 * size then this is not a candidate for identity mapping.
2629 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002630 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002631
David Woodhouse3bdb2592014-03-09 16:03:08 -07002632 if (dev->coherent_dma_mask &&
2633 dev->coherent_dma_mask < dma_mask)
2634 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002635
David Woodhouse3bdb2592014-03-09 16:03:08 -07002636 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002637 }
David Woodhouse6941af22009-07-04 18:24:27 +01002638
2639 return 1;
2640}
2641
David Woodhousecf04eee2014-03-21 16:49:04 +00002642static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2643{
2644 int ret;
2645
2646 if (!iommu_should_identity_map(dev, 1))
2647 return 0;
2648
2649 ret = domain_add_dev_info(si_domain, dev,
2650 hw ? CONTEXT_TT_PASS_THROUGH :
2651 CONTEXT_TT_MULTI_LEVEL);
2652 if (!ret)
2653 pr_info("IOMMU: %s identity mapping for device %s\n",
2654 hw ? "hardware" : "software", dev_name(dev));
2655 else if (ret == -ENODEV)
2656 /* device not associated with an iommu */
2657 ret = 0;
2658
2659 return ret;
2660}
2661
2662
Matt Kraai071e1372009-08-23 22:30:22 -07002663static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002664{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002665 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002666 struct dmar_drhd_unit *drhd;
2667 struct intel_iommu *iommu;
2668 struct device *dev;
2669 int i;
2670 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002671
David Woodhouse19943b02009-08-04 16:19:20 +01002672 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002673 if (ret)
2674 return -EFAULT;
2675
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002676 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002677 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2678 if (ret)
2679 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002680 }
2681
David Woodhousecf04eee2014-03-21 16:49:04 +00002682 for_each_active_iommu(iommu, drhd)
2683 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2684 struct acpi_device_physical_node *pn;
2685 struct acpi_device *adev;
2686
2687 if (dev->bus != &acpi_bus_type)
2688 continue;
2689
2690 adev= to_acpi_device(dev);
2691 mutex_lock(&adev->physical_node_lock);
2692 list_for_each_entry(pn, &adev->physical_node_list, node) {
2693 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2694 if (ret)
2695 break;
2696 }
2697 mutex_unlock(&adev->physical_node_lock);
2698 if (ret)
2699 return ret;
2700 }
2701
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002702 return 0;
2703}
2704
Jiang Liuffebeb42014-11-09 22:48:02 +08002705static void intel_iommu_init_qi(struct intel_iommu *iommu)
2706{
2707 /*
2708 * Start from the sane iommu hardware state.
2709 * If the queued invalidation is already initialized by us
2710 * (for example, while enabling interrupt-remapping) then
2711 * we got the things already rolling from a sane state.
2712 */
2713 if (!iommu->qi) {
2714 /*
2715 * Clear any previous faults.
2716 */
2717 dmar_fault(-1, iommu);
2718 /*
2719 * Disable queued invalidation if supported and already enabled
2720 * before OS handover.
2721 */
2722 dmar_disable_qi(iommu);
2723 }
2724
2725 if (dmar_enable_qi(iommu)) {
2726 /*
2727 * Queued Invalidate not enabled, use Register Based Invalidate
2728 */
2729 iommu->flush.flush_context = __iommu_flush_context;
2730 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2731 pr_info("IOMMU: %s using Register based invalidation\n",
2732 iommu->name);
2733 } else {
2734 iommu->flush.flush_context = qi_flush_context;
2735 iommu->flush.flush_iotlb = qi_flush_iotlb;
2736 pr_info("IOMMU: %s using Queued invalidation\n", iommu->name);
2737 }
2738}
2739
Joseph Cihulab7792602011-05-03 00:08:37 -07002740static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002741{
2742 struct dmar_drhd_unit *drhd;
2743 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002744 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002745 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002746 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002747
2748 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002749 * for each drhd
2750 * allocate root
2751 * initialize and program root entry to not present
2752 * endfor
2753 */
2754 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002755 /*
2756 * lock not needed as this is only incremented in the single
2757 * threaded kernel __init code path all other access are read
2758 * only
2759 */
Jiang Liu78d8e702014-11-09 22:47:57 +08002760 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08002761 g_num_of_iommus++;
2762 continue;
2763 }
2764 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
Jiang Liu78d8e702014-11-09 22:47:57 +08002765 DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002766 }
2767
Jiang Liuffebeb42014-11-09 22:48:02 +08002768 /* Preallocate enough resources for IOMMU hot-addition */
2769 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
2770 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
2771
Weidong Hand9630fe2008-12-08 11:06:32 +08002772 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2773 GFP_KERNEL);
2774 if (!g_iommus) {
2775 printk(KERN_ERR "Allocating global iommu array failed\n");
2776 ret = -ENOMEM;
2777 goto error;
2778 }
2779
mark gross80b20dd2008-04-18 13:53:58 -07002780 deferred_flush = kzalloc(g_num_of_iommus *
2781 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2782 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002783 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08002784 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08002785 }
2786
Jiang Liu7c919772014-01-06 14:18:18 +08002787 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002788 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002789
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002790 ret = iommu_init_domains(iommu);
2791 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002792 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002793
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002794 /*
2795 * TBD:
2796 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002797 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002798 */
2799 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08002800 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002801 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002802 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002803 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002804 }
2805
Jiang Liuffebeb42014-11-09 22:48:02 +08002806 for_each_active_iommu(iommu, drhd)
2807 intel_iommu_init_qi(iommu);
Youquan Songa77b67d2008-10-16 16:31:56 -07002808
David Woodhouse19943b02009-08-04 16:19:20 +01002809 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002810 iommu_identity_mapping |= IDENTMAP_ALL;
2811
Suresh Siddhad3f13812011-08-23 17:05:25 -07002812#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002813 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002814#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002815
2816 check_tylersburg_isoch();
2817
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002818 /*
2819 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002820 * identity mappings for rmrr, gfx, and isa and may fall back to static
2821 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002822 */
David Woodhouse19943b02009-08-04 16:19:20 +01002823 if (iommu_identity_mapping) {
2824 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2825 if (ret) {
2826 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002827 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002828 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002829 }
David Woodhouse19943b02009-08-04 16:19:20 +01002830 /*
2831 * For each rmrr
2832 * for each dev attached to rmrr
2833 * do
2834 * locate drhd for dev, alloc domain for dev
2835 * allocate free domain
2836 * allocate page table entries for rmrr
2837 * if context not allocated for bus
2838 * allocate and init context
2839 * set present in root table for this bus
2840 * init context with domain, translation etc
2841 * endfor
2842 * endfor
2843 */
2844 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2845 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002846 /* some BIOS lists non-exist devices in DMAR table. */
2847 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00002848 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07002849 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01002850 if (ret)
2851 printk(KERN_ERR
2852 "IOMMU: mapping reserved region failed\n");
2853 }
2854 }
2855
2856 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002857
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002858 /*
2859 * for each drhd
2860 * enable fault log
2861 * global invalidate context cache
2862 * global invalidate iotlb
2863 * enable translation
2864 */
Jiang Liu7c919772014-01-06 14:18:18 +08002865 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002866 if (drhd->ignored) {
2867 /*
2868 * we always have to disable PMRs or DMA may fail on
2869 * this device
2870 */
2871 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08002872 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002873 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002874 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002875
2876 iommu_flush_write_buffer(iommu);
2877
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002878 ret = dmar_set_interrupt(iommu);
2879 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002880 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002881
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002882 iommu_set_root_entry(iommu);
2883
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002884 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002885 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Jiang Liu2a41cce2014-07-11 14:19:33 +08002886 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07002887 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002888 }
2889
2890 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08002891
2892free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08002893 for_each_active_iommu(iommu, drhd) {
2894 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08002895 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08002896 }
Jiang Liu9bdc5312014-01-06 14:18:27 +08002897 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08002898free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08002899 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08002900error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002901 return ret;
2902}
2903
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002904/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002905static struct iova *intel_alloc_iova(struct device *dev,
2906 struct dmar_domain *domain,
2907 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002908{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002909 struct iova *iova = NULL;
2910
David Woodhouse875764d2009-06-28 21:20:51 +01002911 /* Restrict dma_mask to the width that the iommu can handle */
2912 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2913
2914 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002915 /*
2916 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002917 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002918 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002919 */
David Woodhouse875764d2009-06-28 21:20:51 +01002920 iova = alloc_iova(&domain->iovad, nrpages,
2921 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2922 if (iova)
2923 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002924 }
David Woodhouse875764d2009-06-28 21:20:51 +01002925 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2926 if (unlikely(!iova)) {
2927 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07002928 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002929 return NULL;
2930 }
2931
2932 return iova;
2933}
2934
David Woodhoused4b709f2014-03-09 16:07:40 -07002935static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002936{
2937 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002938 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002939
David Woodhoused4b709f2014-03-09 16:07:40 -07002940 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002941 if (!domain) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002942 printk(KERN_ERR "Allocating domain for %s failed",
2943 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002944 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002945 }
2946
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002947 /* make sure context mapping is ok */
David Woodhoused4b709f2014-03-09 16:07:40 -07002948 if (unlikely(!domain_context_mapped(dev))) {
2949 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002950 if (ret) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002951 printk(KERN_ERR "Domain context map for %s failed",
2952 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002953 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002954 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002955 }
2956
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002957 return domain;
2958}
2959
David Woodhoused4b709f2014-03-09 16:07:40 -07002960static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01002961{
2962 struct device_domain_info *info;
2963
2964 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07002965 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01002966 if (likely(info))
2967 return info->domain;
2968
2969 return __get_valid_domain_for_dev(dev);
2970}
2971
David Woodhouse3d891942014-03-06 15:59:26 +00002972static int iommu_dummy(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002973{
David Woodhouse3d891942014-03-06 15:59:26 +00002974 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002975}
2976
David Woodhouseecb509e2014-03-09 16:29:55 -07002977/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002978static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002979{
2980 int found;
2981
David Woodhouse3d891942014-03-06 15:59:26 +00002982 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002983 return 1;
2984
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002985 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002986 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002987
David Woodhouse9b226622014-03-09 14:03:28 -07002988 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002989 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07002990 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002991 return 1;
2992 else {
2993 /*
2994 * 32 bit DMA is removed from si_domain and fall back
2995 * to non-identity mapping.
2996 */
David Woodhousebf9c9ed2014-03-09 16:19:13 -07002997 domain_remove_one_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002998 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07002999 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003000 return 0;
3001 }
3002 } else {
3003 /*
3004 * In case of a detached 64 bit DMA device from vm, the device
3005 * is put into si_domain for identity mapping.
3006 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003007 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003008 int ret;
David Woodhouse5913c9b2014-03-09 16:27:31 -07003009 ret = domain_add_dev_info(si_domain, dev,
David Woodhouse5fe60f42009-08-09 10:53:41 +01003010 hw_pass_through ?
3011 CONTEXT_TT_PASS_THROUGH :
3012 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003013 if (!ret) {
3014 printk(KERN_INFO "64bit %s uses identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07003015 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003016 return 1;
3017 }
3018 }
3019 }
3020
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003021 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003022}
3023
David Woodhouse5040a912014-03-09 16:14:00 -07003024static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003025 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003026{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003027 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003028 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003029 struct iova *iova;
3030 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003031 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003032 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003033 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003034
3035 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003036
David Woodhouse5040a912014-03-09 16:14:00 -07003037 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003038 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003039
David Woodhouse5040a912014-03-09 16:14:00 -07003040 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003041 if (!domain)
3042 return 0;
3043
Weidong Han8c11e792008-12-08 15:29:22 +08003044 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003045 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003046
David Woodhouse5040a912014-03-09 16:14:00 -07003047 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003048 if (!iova)
3049 goto error;
3050
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003051 /*
3052 * Check if DMAR supports zero-length reads on write only
3053 * mappings..
3054 */
3055 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003056 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003057 prot |= DMA_PTE_READ;
3058 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3059 prot |= DMA_PTE_WRITE;
3060 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003061 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003062 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003063 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003064 * is not a big problem
3065 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003066 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003067 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003068 if (ret)
3069 goto error;
3070
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003071 /* it's a non-present to present mapping. Only flush if caching mode */
3072 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003073 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003074 else
Weidong Han8c11e792008-12-08 15:29:22 +08003075 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003076
David Woodhouse03d6a242009-06-28 15:33:46 +01003077 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3078 start_paddr += paddr & ~PAGE_MASK;
3079 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003080
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003081error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003082 if (iova)
3083 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00003084 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003085 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003086 return 0;
3087}
3088
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003089static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3090 unsigned long offset, size_t size,
3091 enum dma_data_direction dir,
3092 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003093{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003094 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003095 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003096}
3097
mark gross5e0d2a62008-03-04 15:22:08 -08003098static void flush_unmaps(void)
3099{
mark gross80b20dd2008-04-18 13:53:58 -07003100 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003101
mark gross5e0d2a62008-03-04 15:22:08 -08003102 timer_on = 0;
3103
3104 /* just flush them all */
3105 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003106 struct intel_iommu *iommu = g_iommus[i];
3107 if (!iommu)
3108 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003109
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003110 if (!deferred_flush[i].next)
3111 continue;
3112
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003113 /* In caching mode, global flushes turn emulation expensive */
3114 if (!cap_caching_mode(iommu->cap))
3115 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003116 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003117 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003118 unsigned long mask;
3119 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003120 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003121
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003122 /* On real hardware multiple invalidations are expensive */
3123 if (cap_caching_mode(iommu->cap))
3124 iommu_flush_iotlb_psi(iommu, domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08003125 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00003126 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003127 else {
Jiang Liua156ef92014-07-11 14:19:36 +08003128 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003129 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3130 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3131 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003132 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003133 if (deferred_flush[i].freelist[j])
3134 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003135 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003136 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003137 }
3138
mark gross5e0d2a62008-03-04 15:22:08 -08003139 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003140}
3141
3142static void flush_unmaps_timeout(unsigned long data)
3143{
mark gross80b20dd2008-04-18 13:53:58 -07003144 unsigned long flags;
3145
3146 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003147 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003148 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003149}
3150
David Woodhouseea8ea462014-03-05 17:09:32 +00003151static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003152{
3153 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003154 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003155 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003156
3157 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003158 if (list_size == HIGH_WATER_MARK)
3159 flush_unmaps();
3160
Weidong Han8c11e792008-12-08 15:29:22 +08003161 iommu = domain_get_iommu(dom);
3162 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003163
mark gross80b20dd2008-04-18 13:53:58 -07003164 next = deferred_flush[iommu_id].next;
3165 deferred_flush[iommu_id].domain[next] = dom;
3166 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003167 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003168 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003169
3170 if (!timer_on) {
3171 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3172 timer_on = 1;
3173 }
3174 list_size++;
3175 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3176}
3177
Jiang Liud41a4ad2014-07-11 14:19:34 +08003178static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003179{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003180 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003181 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003182 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003183 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003184 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003185
David Woodhouse73676832009-07-04 14:08:36 +01003186 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003187 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003188
David Woodhouse1525a292014-03-06 16:19:30 +00003189 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003190 BUG_ON(!domain);
3191
Weidong Han8c11e792008-12-08 15:29:22 +08003192 iommu = domain_get_iommu(domain);
3193
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003194 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003195 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3196 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003197 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003198
David Woodhoused794dc92009-06-28 00:27:49 +01003199 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3200 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003201
David Woodhoused794dc92009-06-28 00:27:49 +01003202 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003203 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003204
David Woodhouseea8ea462014-03-05 17:09:32 +00003205 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003206
mark gross5e0d2a62008-03-04 15:22:08 -08003207 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01003208 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003209 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003210 /* free iova */
3211 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003212 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003213 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003214 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003215 /*
3216 * queue up the release of the unmap to save the 1/6th of the
3217 * cpu used up by the iotlb flush operation...
3218 */
mark gross5e0d2a62008-03-04 15:22:08 -08003219 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003220}
3221
Jiang Liud41a4ad2014-07-11 14:19:34 +08003222static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3223 size_t size, enum dma_data_direction dir,
3224 struct dma_attrs *attrs)
3225{
3226 intel_unmap(dev, dev_addr);
3227}
3228
David Woodhouse5040a912014-03-09 16:14:00 -07003229static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003230 dma_addr_t *dma_handle, gfp_t flags,
3231 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003232{
Akinobu Mita36746432014-06-04 16:06:51 -07003233 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003234 int order;
3235
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003236 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003237 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003238
David Woodhouse5040a912014-03-09 16:14:00 -07003239 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003240 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003241 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3242 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003243 flags |= GFP_DMA;
3244 else
3245 flags |= GFP_DMA32;
3246 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003247
Akinobu Mita36746432014-06-04 16:06:51 -07003248 if (flags & __GFP_WAIT) {
3249 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003250
Akinobu Mita36746432014-06-04 16:06:51 -07003251 page = dma_alloc_from_contiguous(dev, count, order);
3252 if (page && iommu_no_mapping(dev) &&
3253 page_to_phys(page) + size > dev->coherent_dma_mask) {
3254 dma_release_from_contiguous(dev, page, count);
3255 page = NULL;
3256 }
3257 }
3258
3259 if (!page)
3260 page = alloc_pages(flags, order);
3261 if (!page)
3262 return NULL;
3263 memset(page_address(page), 0, size);
3264
3265 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003266 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003267 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003268 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003269 return page_address(page);
3270 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3271 __free_pages(page, order);
3272
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003273 return NULL;
3274}
3275
David Woodhouse5040a912014-03-09 16:14:00 -07003276static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003277 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003278{
3279 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003280 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003281
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003282 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003283 order = get_order(size);
3284
Jiang Liud41a4ad2014-07-11 14:19:34 +08003285 intel_unmap(dev, dma_handle);
Akinobu Mita36746432014-06-04 16:06:51 -07003286 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3287 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003288}
3289
David Woodhouse5040a912014-03-09 16:14:00 -07003290static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003291 int nelems, enum dma_data_direction dir,
3292 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003293{
Jiang Liud41a4ad2014-07-11 14:19:34 +08003294 intel_unmap(dev, sglist[0].dma_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003295}
3296
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003297static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003298 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003299{
3300 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003301 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003302
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003303 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003304 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003305 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003306 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003307 }
3308 return nelems;
3309}
3310
David Woodhouse5040a912014-03-09 16:14:00 -07003311static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003312 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003313{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003314 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003315 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003316 size_t size = 0;
3317 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003318 struct iova *iova = NULL;
3319 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003320 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003321 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003322 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003323
3324 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003325 if (iommu_no_mapping(dev))
3326 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003327
David Woodhouse5040a912014-03-09 16:14:00 -07003328 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003329 if (!domain)
3330 return 0;
3331
Weidong Han8c11e792008-12-08 15:29:22 +08003332 iommu = domain_get_iommu(domain);
3333
David Woodhouseb536d242009-06-28 14:49:31 +01003334 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003335 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003336
David Woodhouse5040a912014-03-09 16:14:00 -07003337 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3338 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003339 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003340 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003341 return 0;
3342 }
3343
3344 /*
3345 * Check if DMAR supports zero-length reads on write only
3346 * mappings..
3347 */
3348 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003349 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003350 prot |= DMA_PTE_READ;
3351 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3352 prot |= DMA_PTE_WRITE;
3353
David Woodhouseb536d242009-06-28 14:49:31 +01003354 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003355
Fenghua Yuf5329592009-08-04 15:09:37 -07003356 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003357 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003358 dma_pte_free_pagetable(domain, start_vpfn,
3359 start_vpfn + size - 1);
David Woodhousee1605492009-06-29 11:17:38 +01003360 __free_iova(&domain->iovad, iova);
3361 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003362 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003363
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003364 /* it's a non-present to present mapping. Only flush if caching mode */
3365 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003366 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003367 else
Weidong Han8c11e792008-12-08 15:29:22 +08003368 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003369
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003370 return nelems;
3371}
3372
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003373static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3374{
3375 return !dma_addr;
3376}
3377
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003378struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003379 .alloc = intel_alloc_coherent,
3380 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003381 .map_sg = intel_map_sg,
3382 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003383 .map_page = intel_map_page,
3384 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003385 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003386};
3387
3388static inline int iommu_domain_cache_init(void)
3389{
3390 int ret = 0;
3391
3392 iommu_domain_cache = kmem_cache_create("iommu_domain",
3393 sizeof(struct dmar_domain),
3394 0,
3395 SLAB_HWCACHE_ALIGN,
3396
3397 NULL);
3398 if (!iommu_domain_cache) {
3399 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3400 ret = -ENOMEM;
3401 }
3402
3403 return ret;
3404}
3405
3406static inline int iommu_devinfo_cache_init(void)
3407{
3408 int ret = 0;
3409
3410 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3411 sizeof(struct device_domain_info),
3412 0,
3413 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003414 NULL);
3415 if (!iommu_devinfo_cache) {
3416 printk(KERN_ERR "Couldn't create devinfo cache\n");
3417 ret = -ENOMEM;
3418 }
3419
3420 return ret;
3421}
3422
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003423static int __init iommu_init_mempool(void)
3424{
3425 int ret;
3426 ret = iommu_iova_cache_init();
3427 if (ret)
3428 return ret;
3429
3430 ret = iommu_domain_cache_init();
3431 if (ret)
3432 goto domain_error;
3433
3434 ret = iommu_devinfo_cache_init();
3435 if (!ret)
3436 return ret;
3437
3438 kmem_cache_destroy(iommu_domain_cache);
3439domain_error:
Robin Murphy85b45452015-01-12 17:51:14 +00003440 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003441
3442 return -ENOMEM;
3443}
3444
3445static void __init iommu_exit_mempool(void)
3446{
3447 kmem_cache_destroy(iommu_devinfo_cache);
3448 kmem_cache_destroy(iommu_domain_cache);
Robin Murphy85b45452015-01-12 17:51:14 +00003449 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003450}
3451
Dan Williams556ab452010-07-23 15:47:56 -07003452static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3453{
3454 struct dmar_drhd_unit *drhd;
3455 u32 vtbar;
3456 int rc;
3457
3458 /* We know that this device on this chipset has its own IOMMU.
3459 * If we find it under a different IOMMU, then the BIOS is lying
3460 * to us. Hope that the IOMMU for this device is actually
3461 * disabled, and it needs no translation...
3462 */
3463 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3464 if (rc) {
3465 /* "can't" happen */
3466 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3467 return;
3468 }
3469 vtbar &= 0xffff0000;
3470
3471 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3472 drhd = dmar_find_matched_drhd_unit(pdev);
3473 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3474 TAINT_FIRMWARE_WORKAROUND,
3475 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3476 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3477}
3478DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3479
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003480static void __init init_no_remapping_devices(void)
3481{
3482 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003483 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003484 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003485
3486 for_each_drhd_unit(drhd) {
3487 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003488 for_each_active_dev_scope(drhd->devices,
3489 drhd->devices_cnt, i, dev)
3490 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003491 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003492 if (i == drhd->devices_cnt)
3493 drhd->ignored = 1;
3494 }
3495 }
3496
Jiang Liu7c919772014-01-06 14:18:18 +08003497 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003498 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003499 continue;
3500
Jiang Liub683b232014-02-19 14:07:32 +08003501 for_each_active_dev_scope(drhd->devices,
3502 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003503 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003504 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003505 if (i < drhd->devices_cnt)
3506 continue;
3507
David Woodhousec0771df2011-10-14 20:59:46 +01003508 /* This IOMMU has *only* gfx devices. Either bypass it or
3509 set the gfx_mapped flag, as appropriate */
3510 if (dmar_map_gfx) {
3511 intel_iommu_gfx_mapped = 1;
3512 } else {
3513 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003514 for_each_active_dev_scope(drhd->devices,
3515 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003516 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003517 }
3518 }
3519}
3520
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003521#ifdef CONFIG_SUSPEND
3522static int init_iommu_hw(void)
3523{
3524 struct dmar_drhd_unit *drhd;
3525 struct intel_iommu *iommu = NULL;
3526
3527 for_each_active_iommu(iommu, drhd)
3528 if (iommu->qi)
3529 dmar_reenable_qi(iommu);
3530
Joseph Cihulab7792602011-05-03 00:08:37 -07003531 for_each_iommu(iommu, drhd) {
3532 if (drhd->ignored) {
3533 /*
3534 * we always have to disable PMRs or DMA may fail on
3535 * this device
3536 */
3537 if (force_on)
3538 iommu_disable_protect_mem_regions(iommu);
3539 continue;
3540 }
3541
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003542 iommu_flush_write_buffer(iommu);
3543
3544 iommu_set_root_entry(iommu);
3545
3546 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003547 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08003548 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3549 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003550 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003551 }
3552
3553 return 0;
3554}
3555
3556static void iommu_flush_all(void)
3557{
3558 struct dmar_drhd_unit *drhd;
3559 struct intel_iommu *iommu;
3560
3561 for_each_active_iommu(iommu, drhd) {
3562 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003563 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003564 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003565 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003566 }
3567}
3568
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003569static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003570{
3571 struct dmar_drhd_unit *drhd;
3572 struct intel_iommu *iommu = NULL;
3573 unsigned long flag;
3574
3575 for_each_active_iommu(iommu, drhd) {
3576 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3577 GFP_ATOMIC);
3578 if (!iommu->iommu_state)
3579 goto nomem;
3580 }
3581
3582 iommu_flush_all();
3583
3584 for_each_active_iommu(iommu, drhd) {
3585 iommu_disable_translation(iommu);
3586
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003587 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003588
3589 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3590 readl(iommu->reg + DMAR_FECTL_REG);
3591 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3592 readl(iommu->reg + DMAR_FEDATA_REG);
3593 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3594 readl(iommu->reg + DMAR_FEADDR_REG);
3595 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3596 readl(iommu->reg + DMAR_FEUADDR_REG);
3597
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003598 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003599 }
3600 return 0;
3601
3602nomem:
3603 for_each_active_iommu(iommu, drhd)
3604 kfree(iommu->iommu_state);
3605
3606 return -ENOMEM;
3607}
3608
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003609static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003610{
3611 struct dmar_drhd_unit *drhd;
3612 struct intel_iommu *iommu = NULL;
3613 unsigned long flag;
3614
3615 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003616 if (force_on)
3617 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3618 else
3619 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003620 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003621 }
3622
3623 for_each_active_iommu(iommu, drhd) {
3624
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003625 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003626
3627 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3628 iommu->reg + DMAR_FECTL_REG);
3629 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3630 iommu->reg + DMAR_FEDATA_REG);
3631 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3632 iommu->reg + DMAR_FEADDR_REG);
3633 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3634 iommu->reg + DMAR_FEUADDR_REG);
3635
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003636 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003637 }
3638
3639 for_each_active_iommu(iommu, drhd)
3640 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003641}
3642
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003643static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003644 .resume = iommu_resume,
3645 .suspend = iommu_suspend,
3646};
3647
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003648static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003649{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003650 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003651}
3652
3653#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003654static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003655#endif /* CONFIG_PM */
3656
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003657
Jiang Liuc2a0b532014-11-09 22:47:56 +08003658int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003659{
3660 struct acpi_dmar_reserved_memory *rmrr;
3661 struct dmar_rmrr_unit *rmrru;
3662
3663 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3664 if (!rmrru)
3665 return -ENOMEM;
3666
3667 rmrru->hdr = header;
3668 rmrr = (struct acpi_dmar_reserved_memory *)header;
3669 rmrru->base_address = rmrr->base_address;
3670 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003671 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3672 ((void *)rmrr) + rmrr->header.length,
3673 &rmrru->devices_cnt);
3674 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3675 kfree(rmrru);
3676 return -ENOMEM;
3677 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003678
Jiang Liu2e455282014-02-19 14:07:36 +08003679 list_add(&rmrru->list, &dmar_rmrr_units);
3680
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003681 return 0;
3682}
3683
Jiang Liu6b197242014-11-09 22:47:58 +08003684static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3685{
3686 struct dmar_atsr_unit *atsru;
3687 struct acpi_dmar_atsr *tmp;
3688
3689 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3690 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3691 if (atsr->segment != tmp->segment)
3692 continue;
3693 if (atsr->header.length != tmp->header.length)
3694 continue;
3695 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3696 return atsru;
3697 }
3698
3699 return NULL;
3700}
3701
3702int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003703{
3704 struct acpi_dmar_atsr *atsr;
3705 struct dmar_atsr_unit *atsru;
3706
Jiang Liu6b197242014-11-09 22:47:58 +08003707 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
3708 return 0;
3709
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003710 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08003711 atsru = dmar_find_atsr(atsr);
3712 if (atsru)
3713 return 0;
3714
3715 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003716 if (!atsru)
3717 return -ENOMEM;
3718
Jiang Liu6b197242014-11-09 22:47:58 +08003719 /*
3720 * If memory is allocated from slab by ACPI _DSM method, we need to
3721 * copy the memory content because the memory buffer will be freed
3722 * on return.
3723 */
3724 atsru->hdr = (void *)(atsru + 1);
3725 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003726 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08003727 if (!atsru->include_all) {
3728 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3729 (void *)atsr + atsr->header.length,
3730 &atsru->devices_cnt);
3731 if (atsru->devices_cnt && atsru->devices == NULL) {
3732 kfree(atsru);
3733 return -ENOMEM;
3734 }
3735 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003736
Jiang Liu0e242612014-02-19 14:07:34 +08003737 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003738
3739 return 0;
3740}
3741
Jiang Liu9bdc5312014-01-06 14:18:27 +08003742static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3743{
3744 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3745 kfree(atsru);
3746}
3747
Jiang Liu6b197242014-11-09 22:47:58 +08003748int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3749{
3750 struct acpi_dmar_atsr *atsr;
3751 struct dmar_atsr_unit *atsru;
3752
3753 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3754 atsru = dmar_find_atsr(atsr);
3755 if (atsru) {
3756 list_del_rcu(&atsru->list);
3757 synchronize_rcu();
3758 intel_iommu_free_atsr(atsru);
3759 }
3760
3761 return 0;
3762}
3763
3764int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3765{
3766 int i;
3767 struct device *dev;
3768 struct acpi_dmar_atsr *atsr;
3769 struct dmar_atsr_unit *atsru;
3770
3771 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3772 atsru = dmar_find_atsr(atsr);
3773 if (!atsru)
3774 return 0;
3775
3776 if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
3777 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
3778 i, dev)
3779 return -EBUSY;
3780
3781 return 0;
3782}
3783
Jiang Liuffebeb42014-11-09 22:48:02 +08003784static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
3785{
3786 int sp, ret = 0;
3787 struct intel_iommu *iommu = dmaru->iommu;
3788
3789 if (g_iommus[iommu->seq_id])
3790 return 0;
3791
3792 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
3793 pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
3794 iommu->name);
3795 return -ENXIO;
3796 }
3797 if (!ecap_sc_support(iommu->ecap) &&
3798 domain_update_iommu_snooping(iommu)) {
3799 pr_warn("IOMMU: %s doesn't support snooping.\n",
3800 iommu->name);
3801 return -ENXIO;
3802 }
3803 sp = domain_update_iommu_superpage(iommu) - 1;
3804 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
3805 pr_warn("IOMMU: %s doesn't support large page.\n",
3806 iommu->name);
3807 return -ENXIO;
3808 }
3809
3810 /*
3811 * Disable translation if already enabled prior to OS handover.
3812 */
3813 if (iommu->gcmd & DMA_GCMD_TE)
3814 iommu_disable_translation(iommu);
3815
3816 g_iommus[iommu->seq_id] = iommu;
3817 ret = iommu_init_domains(iommu);
3818 if (ret == 0)
3819 ret = iommu_alloc_root_entry(iommu);
3820 if (ret)
3821 goto out;
3822
3823 if (dmaru->ignored) {
3824 /*
3825 * we always have to disable PMRs or DMA may fail on this device
3826 */
3827 if (force_on)
3828 iommu_disable_protect_mem_regions(iommu);
3829 return 0;
3830 }
3831
3832 intel_iommu_init_qi(iommu);
3833 iommu_flush_write_buffer(iommu);
3834 ret = dmar_set_interrupt(iommu);
3835 if (ret)
3836 goto disable_iommu;
3837
3838 iommu_set_root_entry(iommu);
3839 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3840 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3841 iommu_enable_translation(iommu);
3842
3843 if (si_domain) {
3844 ret = iommu_attach_domain(si_domain, iommu);
3845 if (ret < 0 || si_domain->id != ret)
3846 goto disable_iommu;
3847 domain_attach_iommu(si_domain, iommu);
3848 }
3849
3850 iommu_disable_protect_mem_regions(iommu);
3851 return 0;
3852
3853disable_iommu:
3854 disable_dmar_iommu(iommu);
3855out:
3856 free_dmar_iommu(iommu);
3857 return ret;
3858}
3859
Jiang Liu6b197242014-11-09 22:47:58 +08003860int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
3861{
Jiang Liuffebeb42014-11-09 22:48:02 +08003862 int ret = 0;
3863 struct intel_iommu *iommu = dmaru->iommu;
3864
3865 if (!intel_iommu_enabled)
3866 return 0;
3867 if (iommu == NULL)
3868 return -EINVAL;
3869
3870 if (insert) {
3871 ret = intel_iommu_add(dmaru);
3872 } else {
3873 disable_dmar_iommu(iommu);
3874 free_dmar_iommu(iommu);
3875 }
3876
3877 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08003878}
3879
Jiang Liu9bdc5312014-01-06 14:18:27 +08003880static void intel_iommu_free_dmars(void)
3881{
3882 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3883 struct dmar_atsr_unit *atsru, *atsr_n;
3884
3885 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3886 list_del(&rmrru->list);
3887 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3888 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003889 }
3890
Jiang Liu9bdc5312014-01-06 14:18:27 +08003891 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3892 list_del(&atsru->list);
3893 intel_iommu_free_atsr(atsru);
3894 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003895}
3896
3897int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3898{
Jiang Liub683b232014-02-19 14:07:32 +08003899 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003900 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00003901 struct pci_dev *bridge = NULL;
3902 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003903 struct acpi_dmar_atsr *atsr;
3904 struct dmar_atsr_unit *atsru;
3905
3906 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003907 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08003908 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003909 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003910 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003911 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003912 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003913 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003914 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08003915 if (!bridge)
3916 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003917
Jiang Liu0e242612014-02-19 14:07:34 +08003918 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08003919 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3920 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3921 if (atsr->segment != pci_domain_nr(dev->bus))
3922 continue;
3923
Jiang Liub683b232014-02-19 14:07:32 +08003924 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00003925 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08003926 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003927
3928 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08003929 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003930 }
Jiang Liub683b232014-02-19 14:07:32 +08003931 ret = 0;
3932out:
Jiang Liu0e242612014-02-19 14:07:34 +08003933 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003934
Jiang Liub683b232014-02-19 14:07:32 +08003935 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003936}
3937
Jiang Liu59ce0512014-02-19 14:07:35 +08003938int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3939{
3940 int ret = 0;
3941 struct dmar_rmrr_unit *rmrru;
3942 struct dmar_atsr_unit *atsru;
3943 struct acpi_dmar_atsr *atsr;
3944 struct acpi_dmar_reserved_memory *rmrr;
3945
3946 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3947 return 0;
3948
3949 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3950 rmrr = container_of(rmrru->hdr,
3951 struct acpi_dmar_reserved_memory, header);
3952 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3953 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3954 ((void *)rmrr) + rmrr->header.length,
3955 rmrr->segment, rmrru->devices,
3956 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08003957 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08003958 return ret;
3959 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08003960 dmar_remove_dev_scope(info, rmrr->segment,
3961 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08003962 }
3963 }
3964
3965 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3966 if (atsru->include_all)
3967 continue;
3968
3969 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3970 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3971 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3972 (void *)atsr + atsr->header.length,
3973 atsr->segment, atsru->devices,
3974 atsru->devices_cnt);
3975 if (ret > 0)
3976 break;
3977 else if(ret < 0)
3978 return ret;
3979 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3980 if (dmar_remove_dev_scope(info, atsr->segment,
3981 atsru->devices, atsru->devices_cnt))
3982 break;
3983 }
3984 }
3985
3986 return 0;
3987}
3988
Fenghua Yu99dcade2009-11-11 07:23:06 -08003989/*
3990 * Here we only respond to action of unbound device from driver.
3991 *
3992 * Added device is not attached to its DMAR domain here yet. That will happen
3993 * when mapping the device to iova.
3994 */
3995static int device_notifier(struct notifier_block *nb,
3996 unsigned long action, void *data)
3997{
3998 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08003999 struct dmar_domain *domain;
4000
David Woodhouse3d891942014-03-06 15:59:26 +00004001 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004002 return 0;
4003
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004004 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004005 return 0;
4006
David Woodhouse1525a292014-03-06 16:19:30 +00004007 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004008 if (!domain)
4009 return 0;
4010
Jiang Liu3a5670e2014-02-19 14:07:33 +08004011 down_read(&dmar_global_lock);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004012 domain_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004013 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004014 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08004015 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07004016
Fenghua Yu99dcade2009-11-11 07:23:06 -08004017 return 0;
4018}
4019
4020static struct notifier_block device_nb = {
4021 .notifier_call = device_notifier,
4022};
4023
Jiang Liu75f05562014-02-19 14:07:37 +08004024static int intel_iommu_memory_notifier(struct notifier_block *nb,
4025 unsigned long val, void *v)
4026{
4027 struct memory_notify *mhp = v;
4028 unsigned long long start, end;
4029 unsigned long start_vpfn, last_vpfn;
4030
4031 switch (val) {
4032 case MEM_GOING_ONLINE:
4033 start = mhp->start_pfn << PAGE_SHIFT;
4034 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4035 if (iommu_domain_identity_map(si_domain, start, end)) {
4036 pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
4037 start, end);
4038 return NOTIFY_BAD;
4039 }
4040 break;
4041
4042 case MEM_OFFLINE:
4043 case MEM_CANCEL_ONLINE:
4044 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4045 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4046 while (start_vpfn <= last_vpfn) {
4047 struct iova *iova;
4048 struct dmar_drhd_unit *drhd;
4049 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004050 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004051
4052 iova = find_iova(&si_domain->iovad, start_vpfn);
4053 if (iova == NULL) {
4054 pr_debug("dmar: failed get IOVA for PFN %lx\n",
4055 start_vpfn);
4056 break;
4057 }
4058
4059 iova = split_and_remove_iova(&si_domain->iovad, iova,
4060 start_vpfn, last_vpfn);
4061 if (iova == NULL) {
4062 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
4063 start_vpfn, last_vpfn);
4064 return NOTIFY_BAD;
4065 }
4066
David Woodhouseea8ea462014-03-05 17:09:32 +00004067 freelist = domain_unmap(si_domain, iova->pfn_lo,
4068 iova->pfn_hi);
4069
Jiang Liu75f05562014-02-19 14:07:37 +08004070 rcu_read_lock();
4071 for_each_active_iommu(iommu, drhd)
4072 iommu_flush_iotlb_psi(iommu, si_domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08004073 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004074 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004075 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004076 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004077
4078 start_vpfn = iova->pfn_hi + 1;
4079 free_iova_mem(iova);
4080 }
4081 break;
4082 }
4083
4084 return NOTIFY_OK;
4085}
4086
4087static struct notifier_block intel_iommu_memory_nb = {
4088 .notifier_call = intel_iommu_memory_notifier,
4089 .priority = 0
4090};
4091
Alex Williamsona5459cf2014-06-12 16:12:31 -06004092
4093static ssize_t intel_iommu_show_version(struct device *dev,
4094 struct device_attribute *attr,
4095 char *buf)
4096{
4097 struct intel_iommu *iommu = dev_get_drvdata(dev);
4098 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4099 return sprintf(buf, "%d:%d\n",
4100 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4101}
4102static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4103
4104static ssize_t intel_iommu_show_address(struct device *dev,
4105 struct device_attribute *attr,
4106 char *buf)
4107{
4108 struct intel_iommu *iommu = dev_get_drvdata(dev);
4109 return sprintf(buf, "%llx\n", iommu->reg_phys);
4110}
4111static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4112
4113static ssize_t intel_iommu_show_cap(struct device *dev,
4114 struct device_attribute *attr,
4115 char *buf)
4116{
4117 struct intel_iommu *iommu = dev_get_drvdata(dev);
4118 return sprintf(buf, "%llx\n", iommu->cap);
4119}
4120static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4121
4122static ssize_t intel_iommu_show_ecap(struct device *dev,
4123 struct device_attribute *attr,
4124 char *buf)
4125{
4126 struct intel_iommu *iommu = dev_get_drvdata(dev);
4127 return sprintf(buf, "%llx\n", iommu->ecap);
4128}
4129static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4130
4131static struct attribute *intel_iommu_attrs[] = {
4132 &dev_attr_version.attr,
4133 &dev_attr_address.attr,
4134 &dev_attr_cap.attr,
4135 &dev_attr_ecap.attr,
4136 NULL,
4137};
4138
4139static struct attribute_group intel_iommu_group = {
4140 .name = "intel-iommu",
4141 .attrs = intel_iommu_attrs,
4142};
4143
4144const struct attribute_group *intel_iommu_groups[] = {
4145 &intel_iommu_group,
4146 NULL,
4147};
4148
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004149int __init intel_iommu_init(void)
4150{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004151 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004152 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004153 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004154
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004155 /* VT-d is required for a TXT/tboot launch, so enforce that */
4156 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004157
Jiang Liu3a5670e2014-02-19 14:07:33 +08004158 if (iommu_init_mempool()) {
4159 if (force_on)
4160 panic("tboot: Failed to initialize iommu memory\n");
4161 return -ENOMEM;
4162 }
4163
4164 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004165 if (dmar_table_init()) {
4166 if (force_on)
4167 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004168 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004169 }
4170
Takao Indoh3a93c842013-04-23 17:35:03 +09004171 /*
4172 * Disable translation if already enabled prior to OS handover.
4173 */
Jiang Liu7c919772014-01-06 14:18:18 +08004174 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09004175 if (iommu->gcmd & DMA_GCMD_TE)
4176 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09004177
Suresh Siddhac2c72862011-08-23 17:05:19 -07004178 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004179 if (force_on)
4180 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004181 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004182 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004183
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004184 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08004185 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07004186
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004187 if (list_empty(&dmar_rmrr_units))
4188 printk(KERN_INFO "DMAR: No RMRR found\n");
4189
4190 if (list_empty(&dmar_atsr_units))
4191 printk(KERN_INFO "DMAR: No ATSR found\n");
4192
Joseph Cihula51a63e62011-03-21 11:04:24 -07004193 if (dmar_init_reserved_ranges()) {
4194 if (force_on)
4195 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004196 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004197 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004198
4199 init_no_remapping_devices();
4200
Joseph Cihulab7792602011-05-03 00:08:37 -07004201 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004202 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004203 if (force_on)
4204 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004205 printk(KERN_ERR "IOMMU: dmar init failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004206 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004207 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004208 up_write(&dmar_global_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004209 printk(KERN_INFO
4210 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4211
mark gross5e0d2a62008-03-04 15:22:08 -08004212 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004213#ifdef CONFIG_SWIOTLB
4214 swiotlb = 0;
4215#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004216 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004217
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004218 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004219
Alex Williamsona5459cf2014-06-12 16:12:31 -06004220 for_each_active_iommu(iommu, drhd)
4221 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4222 intel_iommu_groups,
4223 iommu->name);
4224
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004225 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004226 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004227 if (si_domain && !hw_pass_through)
4228 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004229
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004230 intel_iommu_enabled = 1;
4231
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004232 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004233
4234out_free_reserved_range:
4235 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004236out_free_dmar:
4237 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004238 up_write(&dmar_global_lock);
4239 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004240 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004241}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004242
Alex Williamson579305f2014-07-03 09:51:43 -06004243static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4244{
4245 struct intel_iommu *iommu = opaque;
4246
4247 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4248 return 0;
4249}
4250
4251/*
4252 * NB - intel-iommu lacks any sort of reference counting for the users of
4253 * dependent devices. If multiple endpoints have intersecting dependent
4254 * devices, unbinding the driver from any one of them will possibly leave
4255 * the others unable to operate.
4256 */
Han, Weidong3199aa62009-02-26 17:31:12 +08004257static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004258 struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004259{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004260 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004261 return;
4262
Alex Williamson579305f2014-07-03 09:51:43 -06004263 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004264}
4265
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004266static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004267 struct device *dev)
Weidong Hanc7151a82008-12-08 22:51:37 +08004268{
Yijing Wangbca2b912013-10-31 17:26:04 +08004269 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08004270 struct intel_iommu *iommu;
4271 unsigned long flags;
4272 int found = 0;
David Woodhouse156baca2014-03-09 14:00:57 -07004273 u8 bus, devfn;
Weidong Hanc7151a82008-12-08 22:51:37 +08004274
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004275 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08004276 if (!iommu)
4277 return;
4278
4279 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08004280 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004281 if (info->iommu == iommu && info->bus == bus &&
4282 info->devfn == devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01004283 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004284 spin_unlock_irqrestore(&device_domain_lock, flags);
4285
Yu Zhao93a23a72009-05-18 13:51:37 +08004286 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004287 iommu_detach_dev(iommu, info->bus, info->devfn);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004288 iommu_detach_dependent_devices(iommu, dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08004289 free_devinfo_mem(info);
4290
4291 spin_lock_irqsave(&device_domain_lock, flags);
4292
4293 if (found)
4294 break;
4295 else
4296 continue;
4297 }
4298
4299 /* if there is no other devices under the same iommu
4300 * owned by this domain, clear this iommu in iommu_bmp
4301 * update iommu count and coherency
4302 */
David Woodhouse8bbc4412014-03-09 13:52:37 -07004303 if (info->iommu == iommu)
Weidong Hanc7151a82008-12-08 22:51:37 +08004304 found = 1;
4305 }
4306
Roland Dreier3e7abe22011-07-20 06:22:21 -07004307 spin_unlock_irqrestore(&device_domain_lock, flags);
4308
Weidong Hanc7151a82008-12-08 22:51:37 +08004309 if (found == 0) {
Jiang Liufb170fb2014-07-11 14:19:28 +08004310 domain_detach_iommu(domain, iommu);
4311 if (!domain_type_is_vm_or_si(domain))
4312 iommu_detach_domain(domain, iommu);
Weidong Hanc7151a82008-12-08 22:51:37 +08004313 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004314}
4315
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004316static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08004317{
4318 int adjust_width;
4319
Robin Murphy0fb5fe82015-01-12 17:51:16 +00004320 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4321 DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004322 domain_reserve_special_ranges(domain);
4323
4324 /* calculate AGAW */
4325 domain->gaw = guest_width;
4326 adjust_width = guestwidth_to_adjustwidth(guest_width);
4327 domain->agaw = width_to_agaw(adjust_width);
4328
Weidong Han5e98c4b2008-12-08 23:03:27 +08004329 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004330 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004331 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004332 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004333
4334 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004335 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004336 if (!domain->pgd)
4337 return -ENOMEM;
4338 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4339 return 0;
4340}
4341
Joerg Roedel5d450802008-12-03 14:52:32 +01004342static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004343{
Joerg Roedel5d450802008-12-03 14:52:32 +01004344 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004345
Jiang Liuab8dfe22014-07-11 14:19:27 +08004346 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004347 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03004348 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004349 "intel_iommu_domain_init: dmar_domain == NULL\n");
4350 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004351 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004352 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03004353 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004354 "intel_iommu_domain_init() failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004355 domain_exit(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004356 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004357 }
Allen Kay8140a952011-10-14 12:32:17 -07004358 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004359 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004360
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004361 domain->geometry.aperture_start = 0;
4362 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4363 domain->geometry.force_aperture = true;
4364
Joerg Roedel5d450802008-12-03 14:52:32 +01004365 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004366}
Kay, Allen M38717942008-09-09 18:37:29 +03004367
Joerg Roedel5d450802008-12-03 14:52:32 +01004368static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004369{
Joerg Roedel5d450802008-12-03 14:52:32 +01004370 struct dmar_domain *dmar_domain = domain->priv;
4371
4372 domain->priv = NULL;
Jiang Liu92d03cc2014-02-19 14:07:28 +08004373 domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004374}
Kay, Allen M38717942008-09-09 18:37:29 +03004375
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004376static int intel_iommu_attach_device(struct iommu_domain *domain,
4377 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004378{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004379 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004380 struct intel_iommu *iommu;
4381 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004382 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004383
Alex Williamsonc875d2c2014-07-03 09:57:02 -06004384 if (device_is_rmrr_locked(dev)) {
4385 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4386 return -EPERM;
4387 }
4388
David Woodhouse7207d8f2014-03-09 16:31:06 -07004389 /* normally dev is not mapped */
4390 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004391 struct dmar_domain *old_domain;
4392
David Woodhouse1525a292014-03-06 16:19:30 +00004393 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004394 if (old_domain) {
Jiang Liuab8dfe22014-07-11 14:19:27 +08004395 if (domain_type_is_vm_or_si(dmar_domain))
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004396 domain_remove_one_dev_info(old_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004397 else
4398 domain_remove_dev_info(old_domain);
Joerg Roedel62c22162014-12-09 12:56:45 +01004399
4400 if (!domain_type_is_vm_or_si(old_domain) &&
4401 list_empty(&old_domain->devices))
4402 domain_exit(old_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004403 }
4404 }
4405
David Woodhouse156baca2014-03-09 14:00:57 -07004406 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004407 if (!iommu)
4408 return -ENODEV;
4409
4410 /* check if this iommu agaw is sufficient for max mapped address */
4411 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004412 if (addr_width > cap_mgaw(iommu->cap))
4413 addr_width = cap_mgaw(iommu->cap);
4414
4415 if (dmar_domain->max_addr > (1LL << addr_width)) {
4416 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004417 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004418 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004419 return -EFAULT;
4420 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004421 dmar_domain->gaw = addr_width;
4422
4423 /*
4424 * Knock out extra levels of page tables if necessary
4425 */
4426 while (iommu->agaw < dmar_domain->agaw) {
4427 struct dma_pte *pte;
4428
4429 pte = dmar_domain->pgd;
4430 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004431 dmar_domain->pgd = (struct dma_pte *)
4432 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004433 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004434 }
4435 dmar_domain->agaw--;
4436 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004437
David Woodhouse5913c9b2014-03-09 16:27:31 -07004438 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004439}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004440
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004441static void intel_iommu_detach_device(struct iommu_domain *domain,
4442 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004443{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004444 struct dmar_domain *dmar_domain = domain->priv;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004445
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004446 domain_remove_one_dev_info(dmar_domain, dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004447}
Kay, Allen M38717942008-09-09 18:37:29 +03004448
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004449static int intel_iommu_map(struct iommu_domain *domain,
4450 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004451 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004452{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004453 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004454 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004455 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004456 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004457
Joerg Roedeldde57a22008-12-03 15:04:09 +01004458 if (iommu_prot & IOMMU_READ)
4459 prot |= DMA_PTE_READ;
4460 if (iommu_prot & IOMMU_WRITE)
4461 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004462 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4463 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004464
David Woodhouse163cc522009-06-28 00:51:17 +01004465 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004466 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004467 u64 end;
4468
4469 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004470 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004471 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004472 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004473 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004474 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004475 return -EFAULT;
4476 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004477 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004478 }
David Woodhousead051222009-06-28 14:22:28 +01004479 /* Round up size to next multiple of PAGE_SIZE, if it and
4480 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004481 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004482 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4483 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004484 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004485}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004486
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004487static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004488 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004489{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004490 struct dmar_domain *dmar_domain = domain->priv;
David Woodhouseea8ea462014-03-05 17:09:32 +00004491 struct page *freelist = NULL;
4492 struct intel_iommu *iommu;
4493 unsigned long start_pfn, last_pfn;
4494 unsigned int npages;
4495 int iommu_id, num, ndomains, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004496
David Woodhouse5cf0a762014-03-19 16:07:49 +00004497 /* Cope with horrid API which requires us to unmap more than the
4498 size argument if it happens to be a large-page mapping. */
4499 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4500 BUG();
4501
4502 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4503 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4504
David Woodhouseea8ea462014-03-05 17:09:32 +00004505 start_pfn = iova >> VTD_PAGE_SHIFT;
4506 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4507
4508 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4509
4510 npages = last_pfn - start_pfn + 1;
4511
4512 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4513 iommu = g_iommus[iommu_id];
4514
4515 /*
4516 * find bit position of dmar_domain
4517 */
4518 ndomains = cap_ndoms(iommu->cap);
4519 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4520 if (iommu->domains[num] == dmar_domain)
4521 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4522 npages, !freelist, 0);
4523 }
4524
4525 }
4526
4527 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004528
David Woodhouse163cc522009-06-28 00:51:17 +01004529 if (dmar_domain->max_addr == iova + size)
4530 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004531
David Woodhouse5cf0a762014-03-19 16:07:49 +00004532 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004533}
Kay, Allen M38717942008-09-09 18:37:29 +03004534
Joerg Roedeld14d6572008-12-03 15:06:57 +01004535static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05304536 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004537{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004538 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004539 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004540 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004541 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004542
David Woodhouse5cf0a762014-03-19 16:07:49 +00004543 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004544 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004545 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004546
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004547 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004548}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004549
Joerg Roedel5d587b82014-09-05 10:50:45 +02004550static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004551{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004552 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004553 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04004554 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004555 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004556
Joerg Roedel5d587b82014-09-05 10:50:45 +02004557 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004558}
4559
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004560static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004561{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004562 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004563 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07004564 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004565
Alex Williamsona5459cf2014-06-12 16:12:31 -06004566 iommu = device_to_iommu(dev, &bus, &devfn);
4567 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004568 return -ENODEV;
4569
Alex Williamsona5459cf2014-06-12 16:12:31 -06004570 iommu_device_link(iommu->iommu_dev, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004571
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004572 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06004573
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004574 if (IS_ERR(group))
4575 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004576
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004577 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004578 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004579}
4580
4581static void intel_iommu_remove_device(struct device *dev)
4582{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004583 struct intel_iommu *iommu;
4584 u8 bus, devfn;
4585
4586 iommu = device_to_iommu(dev, &bus, &devfn);
4587 if (!iommu)
4588 return;
4589
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004590 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004591
4592 iommu_device_unlink(iommu->iommu_dev, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004593}
4594
Thierry Redingb22f6432014-06-27 09:03:12 +02004595static const struct iommu_ops intel_iommu_ops = {
Joerg Roedel5d587b82014-09-05 10:50:45 +02004596 .capable = intel_iommu_capable,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004597 .domain_init = intel_iommu_domain_init,
4598 .domain_destroy = intel_iommu_domain_destroy,
4599 .attach_dev = intel_iommu_attach_device,
4600 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004601 .map = intel_iommu_map,
4602 .unmap = intel_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07004603 .map_sg = default_iommu_map_sg,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004604 .iova_to_phys = intel_iommu_iova_to_phys,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004605 .add_device = intel_iommu_add_device,
4606 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004607 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004608};
David Woodhouse9af88142009-02-13 23:18:03 +00004609
Daniel Vetter94526182013-01-20 23:50:13 +01004610static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4611{
4612 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4613 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4614 dmar_map_gfx = 0;
4615}
4616
4617DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4618DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4619DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4620DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4621DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4622DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4623DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4624
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004625static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004626{
4627 /*
4628 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004629 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004630 */
4631 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4632 rwbf_quirk = 1;
4633}
4634
4635DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004636DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4637DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4638DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4639DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4640DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4641DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004642
Adam Jacksoneecfd572010-08-25 21:17:34 +01004643#define GGC 0x52
4644#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4645#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4646#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4647#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4648#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4649#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4650#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4651#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4652
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004653static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004654{
4655 unsigned short ggc;
4656
Adam Jacksoneecfd572010-08-25 21:17:34 +01004657 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004658 return;
4659
Adam Jacksoneecfd572010-08-25 21:17:34 +01004660 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004661 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4662 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004663 } else if (dmar_map_gfx) {
4664 /* we have to ensure the gfx device is idle before we flush */
4665 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4666 intel_iommu_strict = 1;
4667 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004668}
4669DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4670DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4671DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4672DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4673
David Woodhousee0fc7e02009-09-30 09:12:17 -07004674/* On Tylersburg chipsets, some BIOSes have been known to enable the
4675 ISOCH DMAR unit for the Azalia sound device, but not give it any
4676 TLB entries, which causes it to deadlock. Check for that. We do
4677 this in a function called from init_dmars(), instead of in a PCI
4678 quirk, because we don't want to print the obnoxious "BIOS broken"
4679 message if VT-d is actually disabled.
4680*/
4681static void __init check_tylersburg_isoch(void)
4682{
4683 struct pci_dev *pdev;
4684 uint32_t vtisochctrl;
4685
4686 /* If there's no Azalia in the system anyway, forget it. */
4687 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4688 if (!pdev)
4689 return;
4690 pci_dev_put(pdev);
4691
4692 /* System Management Registers. Might be hidden, in which case
4693 we can't do the sanity check. But that's OK, because the
4694 known-broken BIOSes _don't_ actually hide it, so far. */
4695 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4696 if (!pdev)
4697 return;
4698
4699 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4700 pci_dev_put(pdev);
4701 return;
4702 }
4703
4704 pci_dev_put(pdev);
4705
4706 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4707 if (vtisochctrl & 1)
4708 return;
4709
4710 /* Drop all bits other than the number of TLB entries */
4711 vtisochctrl &= 0x1c;
4712
4713 /* If we have the recommended number of TLB entries (16), fine. */
4714 if (vtisochctrl == 0x10)
4715 return;
4716
4717 /* Zero TLB entries? You get to ride the short bus to school. */
4718 if (!vtisochctrl) {
4719 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4720 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4721 dmi_get_system_info(DMI_BIOS_VENDOR),
4722 dmi_get_system_info(DMI_BIOS_VERSION),
4723 dmi_get_system_info(DMI_PRODUCT_VERSION));
4724 iommu_identity_mapping |= IDENTMAP_AZALIA;
4725 return;
4726 }
4727
4728 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4729 vtisochctrl);
4730}